qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2] vhost-vdpa: Do not send empty IOTLB update batches


From: Eugenio Perez Martin
Subject: Re: [PATCH v2] vhost-vdpa: Do not send empty IOTLB update batches
Date: Thu, 12 Aug 2021 08:43:12 +0200

On Thu, Aug 12, 2021 at 8:16 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Thu, Aug 12, 2021 at 12:32 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> >
> > With the introduction of the batch hinting, meaningless batches can be
> > created with no IOTLB updates if the memory region was skipped by
> > vhost_vdpa_listener_skipped_section. This is the case of host notifiers
> > memory regions, device un/realize, and others. This causes the vdpa
> > device to receive dma mapping settings with no changes, a possibly
> > expensive operation for nothing.
> >
> > To avoid that, VHOST_IOTLB_BATCH_BEGIN hint is delayed until we have a
> > meaningful (not skipped section) mapping or unmapping operation, and
> > VHOST_IOTLB_BATCH_END is not written unless at least one of _UPDATE /
> > _INVALIDATE has been issued.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> >  include/hw/virtio/vhost-vdpa.h |  1 +
> >  hw/virtio/vhost-vdpa.c         | 50 ++++++++++++++++++++++++++--------
> >  2 files changed, 39 insertions(+), 12 deletions(-)
> >
> > diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> > index e98e327f12..6538572a6f 100644
> > --- a/include/hw/virtio/vhost-vdpa.h
> > +++ b/include/hw/virtio/vhost-vdpa.h
> > @@ -23,6 +23,7 @@ typedef struct vhost_vdpa {
> >      int device_fd;
> >      int index;
> >      uint32_t msg_type;
> > +    size_t n_mr_updated;
> >      MemoryListener listener;
> >      struct vhost_dev *dev;
> >      VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
> > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > index 6ce94a1f4d..512fa18d68 100644
> > --- a/hw/virtio/vhost-vdpa.c
> > +++ b/hw/virtio/vhost-vdpa.c
> > @@ -89,19 +89,13 @@ static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, 
> > hwaddr iova,
> >      return ret;
> >  }
> >
> > -static void vhost_vdpa_listener_begin(MemoryListener *listener)
> > +static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
> >  {
> > -    struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, 
> > listener);
> > -    struct vhost_dev *dev = v->dev;
> > -    struct vhost_msg_v2 msg = {};
> >      int fd = v->device_fd;
> > -
> > -    if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
> > -        return;
> > -    }
> > -
> > -    msg.type = v->msg_type;
> > -    msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
> > +    struct vhost_msg_v2 msg = {
> > +        .type = v->msg_type,
> > +        .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
> > +    };
> >
> >      if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
> >          error_report("failed to write, fd=%d, errno=%d (%s)",
> > @@ -109,6 +103,25 @@ static void vhost_vdpa_listener_begin(MemoryListener 
> > *listener)
> >      }
> >  }
> >
> > +static bool vhost_vdpa_iotlb_batch_is_started(const struct vhost_vdpa *v)
> > +{
> > +    return v->n_mr_updated != 0;
> > +}
> > +
> > +static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
> > +{
> > +    if (!vhost_vdpa_iotlb_batch_is_started(v)) {
> > +        vhost_vdpa_listener_begin_batch(v);
> > +    }
> > +
> > +    v->n_mr_updated++;
> > +}
> > +
> > +static void vhost_vdpa_iotlb_batch_reset(struct vhost_vdpa *v)
> > +{
> > +    v->n_mr_updated = 0;
> > +}
> > +
> >  static void vhost_vdpa_listener_commit(MemoryListener *listener)
> >  {
> >      struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, 
> > listener);
> > @@ -120,6 +133,10 @@ static void vhost_vdpa_listener_commit(MemoryListener 
> > *listener)
> >          return;
> >      }
> >
> > +    if (vhost_vdpa_iotlb_batch_is_started(v)) {
> > +        return;
> > +    }
> > +
> >      msg.type = v->msg_type;
> >      msg.iotlb.type = VHOST_IOTLB_BATCH_END;
> >
> > @@ -127,6 +144,8 @@ static void vhost_vdpa_listener_commit(MemoryListener 
> > *listener)
> >          error_report("failed to write, fd=%d, errno=%d (%s)",
> >                       fd, errno, strerror(errno));
> >      }
> > +
> > +    vhost_vdpa_iotlb_batch_reset(v);
> >  }
> >
> >  static void vhost_vdpa_listener_region_add(MemoryListener *listener,
> > @@ -170,6 +189,10 @@ static void 
> > vhost_vdpa_listener_region_add(MemoryListener *listener,
> >
> >      llsize = int128_sub(llend, int128_make64(iova));
> >
> > +    if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH)) {
>
> Let's move this in to vhost_vdpa_iotlb_batch_begin_once()?
>

Sure

> > +        vhost_vdpa_iotlb_batch_begin_once(v);
> > +    }
> > +
> >      ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
> >                               vaddr, section->readonly);
> >      if (ret) {
> > @@ -221,6 +244,10 @@ static void 
> > vhost_vdpa_listener_region_del(MemoryListener *listener,
> >
> >      llsize = int128_sub(llend, int128_make64(iova));
> >
> > +    if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH)) {
> > +        vhost_vdpa_iotlb_batch_begin_once(v);
> > +    }
> > +
>
> Do we need to check vhost_vdpa_iotlb_batch_is_started() in the .commit?
>

I don't follow you here. It's that comment in this position of the
patch for a reason?

That checking is the one that allows qemu to skip the IOTLB_END write.

> Others look good.
>
> Thanks
>
> >      ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
> >      if (ret) {
> >          error_report("vhost_vdpa dma unmap error!");
> > @@ -234,7 +261,6 @@ static void 
> > vhost_vdpa_listener_region_del(MemoryListener *listener,
> >   * depends on the addnop().
> >   */
> >  static const MemoryListener vhost_vdpa_memory_listener = {
> > -    .begin = vhost_vdpa_listener_begin,
> >      .commit = vhost_vdpa_listener_commit,
> >      .region_add = vhost_vdpa_listener_region_add,
> >      .region_del = vhost_vdpa_listener_region_del,
> > --
> > 2.27.0
> >
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]