[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect
From: |
Yongji Xie |
Subject: |
Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect backend |
Date: |
Wed, 13 Mar 2019 10:05:51 +0800 |
On Wed, 13 Mar 2019 at 00:49, Michael S. Tsirkin <address@hidden> wrote:
>
> On Thu, Feb 28, 2019 at 04:53:54PM +0800, address@hidden wrote:
> > From: Xie Yongji <address@hidden>
> >
> > Since we now support the message VHOST_USER_GET_INFLIGHT_FD
> > and VHOST_USER_SET_INFLIGHT_FD. The backend is able to restart
> > safely because it can track inflight I/O in shared memory.
> > This patch allows qemu to reconnect the backend after
> > connection closed.
> >
> > Signed-off-by: Xie Yongji <address@hidden>
> > Signed-off-by: Ni Xun <address@hidden>
> > Signed-off-by: Zhang Yu <address@hidden>
> > ---
> > hw/block/vhost-user-blk.c | 205 +++++++++++++++++++++++------
> > include/hw/virtio/vhost-user-blk.h | 4 +
> > 2 files changed, 167 insertions(+), 42 deletions(-)
> >
> > diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
> > index 9682df1a7b..539ea2e571 100644
> > --- a/hw/block/vhost-user-blk.c
> > +++ b/hw/block/vhost-user-blk.c
> > @@ -103,7 +103,7 @@ const VhostDevConfigOps blk_ops = {
> > .vhost_dev_config_notifier = vhost_user_blk_handle_config_change,
> > };
> >
> > -static void vhost_user_blk_start(VirtIODevice *vdev)
> > +static int vhost_user_blk_start(VirtIODevice *vdev)
> > {
> > VHostUserBlk *s = VHOST_USER_BLK(vdev);
> > BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
> > @@ -112,13 +112,13 @@ static void vhost_user_blk_start(VirtIODevice *vdev)
> >
> > if (!k->set_guest_notifiers) {
> > error_report("binding does not support guest notifiers");
> > - return;
> > + return -ENOSYS;
> > }
> >
> > ret = vhost_dev_enable_notifiers(&s->dev, vdev);
> > if (ret < 0) {
> > error_report("Error enabling host notifiers: %d", -ret);
> > - return;
> > + return ret;
> > }
> >
> > ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, true);
> > @@ -157,12 +157,13 @@ static void vhost_user_blk_start(VirtIODevice *vdev)
> > vhost_virtqueue_mask(&s->dev, vdev, i, false);
> > }
> >
> > - return;
> > + return ret;
> >
> > err_guest_notifiers:
> > k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
> > err_host_notifiers:
> > vhost_dev_disable_notifiers(&s->dev, vdev);
> > + return ret;
> > }
> >
> > static void vhost_user_blk_stop(VirtIODevice *vdev)
> > @@ -181,7 +182,6 @@ static void vhost_user_blk_stop(VirtIODevice *vdev)
> > ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
> > if (ret < 0) {
> > error_report("vhost guest notifier cleanup failed: %d", ret);
> > - return;
> > }
> >
> > vhost_dev_disable_notifiers(&s->dev, vdev);
> > @@ -191,21 +191,43 @@ static void vhost_user_blk_set_status(VirtIODevice
> > *vdev, uint8_t status)
> > {
> > VHostUserBlk *s = VHOST_USER_BLK(vdev);
> > bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK;
> > + int ret;
> >
> > if (!vdev->vm_running) {
> > should_start = false;
> > }
> >
> > - if (s->dev.started == should_start) {
> > + if (s->should_start == should_start) {
> > + return;
> > + }
> > +
> > + if (!s->connected || s->dev.started == should_start) {
> > + s->should_start = should_start;
> > return;
> > }
> >
> > if (should_start) {
> > - vhost_user_blk_start(vdev);
> > + s->should_start = true;
> > + /*
> > + * make sure vhost_user_blk_handle_output() ignores fake
> > + * guest kick by vhost_dev_enable_notifiers()
> > + */
> > + barrier();
> > + ret = vhost_user_blk_start(vdev);
> > + if (ret < 0) {
> > + error_report("vhost-user-blk: vhost start failed: %s",
> > + strerror(-ret));
> > + qemu_chr_fe_disconnect(&s->chardev);
> > + }
> > } else {
> > vhost_user_blk_stop(vdev);
> > + /*
> > + * make sure vhost_user_blk_handle_output() ignore fake
> > + * guest kick by vhost_dev_disable_notifiers()
> > + */
> > + barrier();
> > + s->should_start = false;
> > }
> > -
> > }
> >
> > static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev,
> > @@ -237,13 +259,22 @@ static uint64_t
> > vhost_user_blk_get_features(VirtIODevice *vdev,
> > static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
> > {
> > VHostUserBlk *s = VHOST_USER_BLK(vdev);
> > - int i;
> > + int i, ret;
> >
> > if (!(virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1) &&
> > !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1))) {
> > return;
> > }
> >
> > + if (s->should_start) {
> > + return;
> > + }
> > + s->should_start = true;
> > +
> > + if (!s->connected) {
> > + return;
> > + }
> > +
> > if (s->dev.started) {
> > return;
> > }
> > @@ -251,7 +282,13 @@ static void vhost_user_blk_handle_output(VirtIODevice
> > *vdev, VirtQueue *vq)
> > /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
> > * vhost here instead of waiting for .set_status().
> > */
> > - vhost_user_blk_start(vdev);
> > + ret = vhost_user_blk_start(vdev);
> > + if (ret < 0) {
> > + error_report("vhost-user-blk: vhost start failed: %s",
> > + strerror(-ret));
> > + qemu_chr_fe_disconnect(&s->chardev);
> > + return;
> > + }
> >
> > /* Kick right away to begin processing requests already in vring */
> > for (i = 0; i < s->dev.nvqs; i++) {
> > @@ -271,13 +308,106 @@ static void vhost_user_blk_reset(VirtIODevice *vdev)
> > vhost_dev_free_inflight(s->inflight);
> > }
> >
> > +static int vhost_user_blk_connect(DeviceState *dev)
> > +{
> > + VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > + VHostUserBlk *s = VHOST_USER_BLK(vdev);
> > + int ret = 0;
> > +
> > + if (s->connected) {
> > + return 0;
> > + }
> > + s->connected = true;
> > +
> > + s->dev.nvqs = s->num_queues;
> > + s->dev.vqs = s->vqs;
> > + s->dev.vq_index = 0;
> > + s->dev.backend_features = 0;
> > +
> > + vhost_dev_set_config_notifier(&s->dev, &blk_ops);
> > +
> > + ret = vhost_dev_init(&s->dev, s->vhost_user, VHOST_BACKEND_TYPE_USER,
> > 0);
> > + if (ret < 0) {
> > + error_report("vhost-user-blk: vhost initialization failed: %s",
> > + strerror(-ret));
> > + return ret;
> > + }
> > +
> > + /* restore vhost state */
> > + if (s->should_start) {
> > + ret = vhost_user_blk_start(vdev);
> > + if (ret < 0) {
> > + error_report("vhost-user-blk: vhost start failed: %s",
> > + strerror(-ret));
> > + return ret;
> > + }
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static void vhost_user_blk_disconnect(DeviceState *dev)
> > +{
> > + VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > + VHostUserBlk *s = VHOST_USER_BLK(vdev);
> > +
> > + if (!s->connected) {
> > + return;
> > + }
> > + s->connected = false;
> > +
> > + if (s->dev.started) {
> > + vhost_user_blk_stop(vdev);
> > + }
> > +
> > + vhost_dev_cleanup(&s->dev);
> > +}
> > +
> > +static gboolean vhost_user_blk_watch(GIOChannel *chan, GIOCondition cond,
> > + void *opaque)
> > +{
> > + DeviceState *dev = opaque;
> > + VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > + VHostUserBlk *s = VHOST_USER_BLK(vdev);
> > +
> > + qemu_chr_fe_disconnect(&s->chardev);
> > +
> > + return true;
> > +}
> > +
> > +static void vhost_user_blk_event(void *opaque, int event)
> > +{
> > + DeviceState *dev = opaque;
> > + VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > + VHostUserBlk *s = VHOST_USER_BLK(vdev);
> > +
> > + switch (event) {
> > + case CHR_EVENT_OPENED:
> > + if (vhost_user_blk_connect(dev) < 0) {
> > + qemu_chr_fe_disconnect(&s->chardev);
> > + return;
> > + }
> > + s->watch = qemu_chr_fe_add_watch(&s->chardev, G_IO_HUP,
> > + vhost_user_blk_watch, dev);
> > + break;
> > + case CHR_EVENT_CLOSED:
> > + vhost_user_blk_disconnect(dev);
> > + if (s->watch) {
> > + g_source_remove(s->watch);
> > + s->watch = 0;
> > + }
> > + break;
> > + }
> > +}
> > +
> > +
> > static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
> > {
> > VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> > VHostUserBlk *s = VHOST_USER_BLK(vdev);
> > VhostUserState *user;
> > - struct vhost_virtqueue *vqs = NULL;
> > int i, ret;
> > + Error *err = NULL;
> >
> > if (!s->chardev.chr) {
> > error_setg(errp, "vhost-user-blk: chardev is mandatory");
> > @@ -312,27 +442,28 @@ static void vhost_user_blk_device_realize(DeviceState
> > *dev, Error **errp)
> > }
> >
> > s->inflight = g_new0(struct vhost_inflight, 1);
> > -
> > - s->dev.nvqs = s->num_queues;
> > - s->dev.vqs = g_new(struct vhost_virtqueue, s->dev.nvqs);
> > - s->dev.vq_index = 0;
> > - s->dev.backend_features = 0;
> > - vqs = s->dev.vqs;
> > -
> > - vhost_dev_set_config_notifier(&s->dev, &blk_ops);
> > -
> > - ret = vhost_dev_init(&s->dev, s->vhost_user, VHOST_BACKEND_TYPE_USER,
> > 0);
> > - if (ret < 0) {
> > - error_setg(errp, "vhost-user-blk: vhost initialization failed: %s",
> > - strerror(-ret));
> > - goto virtio_err;
> > - }
> > + s->vqs = g_new(struct vhost_virtqueue, s->num_queues);
> > + s->watch = 0;
> > + s->should_start = false;
> > + s->connected = false;
> > +
> > + qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL,
> > vhost_user_blk_event,
> > + NULL, (void *)dev, NULL, true);
> > +
> > +reconnect:
> > + do {
> > + if (qemu_chr_fe_wait_connected(&s->chardev, &err) < 0) {
> > + error_report_err(err);
> > + err = NULL;
> > + sleep(1);
>
> Seems arbitrary. Is this basically waiting until backend will reconnect?
> Why not block until event on the fd triggers?
>
Qemu is client in current vhost-user-blk implenment. Seems like
backend will not try to connect.
> Also, it looks like this will just block forever with no monitor input
> and no way for user to figure out what is going on short of
> crashing QEMU.
>
Yes, because I want to support the case that backend starts after
qemu. I can print some warning and add timeout on this.
Thanks,
Yongji
- Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect backend, Michael S. Tsirkin, 2019/03/12
- Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect backend,
Yongji Xie <=
- Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect backend, Daniel P . Berrangé, 2019/03/14
- Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect backend, Michael S. Tsirkin, 2019/03/14
- Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect backend, Daniel P . Berrangé, 2019/03/14
- Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect backend, Michael S. Tsirkin, 2019/03/14
- Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect backend, Yury Kotov, 2019/03/14
- Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect backend, Yongji Xie, 2019/03/14
- Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect backend, Daniel P . Berrangé, 2019/03/15
- Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect backend, Yury Kotov, 2019/03/15
- Re: [Qemu-devel] [PATCH v7 6/7] vhost-user-blk: Add support to reconnect backend, Yongji Xie, 2019/03/15