qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 9.0 01/13] vdpa: add VhostVDPAShared


From: Eugenio Perez Martin
Subject: Re: [PATCH 9.0 01/13] vdpa: add VhostVDPAShared
Date: Fri, 1 Dec 2023 07:41:47 +0100

On Fri, Dec 1, 2023 at 6:35 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Sat, Nov 25, 2023 at 1:14 AM Eugenio Pérez <eperezma@redhat.com> wrote:
> >
> > It will hold properties shared among all vhost_vdpa instances associated
> > with of the same device.  For example, we just need one iova_tree or one
> > memory listener for the entire device.
> >
> > Next patches will register the vhost_vdpa memory listener at the
> > beginning of the VM migration at the destination. This enables QEMU to
> > map the memory to the device before stopping the VM at the source,
> > instead of doing while both source and destination are stopped, thus
> > minimizing the downtime.
> >
> > However, the destination QEMU is unaware of which vhost_vdpa struct will
> > register its memory_listener.  If the source guest has CVQ enabled, it
> > will be the one associated with the CVQ.  Otherwise, it will be the
> > first one.
> >
> > Save the memory operations related members in a common place rather than
> > always in the first / last vhost_vdpa.
>
> Great.
>
> Patch looks good but I think we probably need a better name like
> VhostVDPAParent?
>

Sure, I'm ok with the renaming. I'll change it for v2.

Thanks!

> And it would be better in the future if we can convert it to QOM.
>
> Thanks
>
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> >  include/hw/virtio/vhost-vdpa.h |  5 +++++
> >  net/vhost-vdpa.c               | 24 ++++++++++++++++++++++--
> >  2 files changed, 27 insertions(+), 2 deletions(-)
> >
> > diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> > index 5407d54fd7..eb1a56d75a 100644
> > --- a/include/hw/virtio/vhost-vdpa.h
> > +++ b/include/hw/virtio/vhost-vdpa.h
> > @@ -30,6 +30,10 @@ typedef struct VhostVDPAHostNotifier {
> >      void *addr;
> >  } VhostVDPAHostNotifier;
> >
> > +/* Info shared by all vhost_vdpa device models */
> > +typedef struct vhost_vdpa_shared {
> > +} VhostVDPAShared;
> > +
> >  typedef struct vhost_vdpa {
> >      int device_fd;
> >      int index;
> > @@ -46,6 +50,7 @@ typedef struct vhost_vdpa {
> >      bool suspended;
> >      /* IOVA mapping used by the Shadow Virtqueue */
> >      VhostIOVATree *iova_tree;
> > +    VhostVDPAShared *shared;
> >      GPtrArray *shadow_vqs;
> >      const VhostShadowVirtqueueOps *shadow_vq_ops;
> >      void *shadow_vq_ops_opaque;
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index d0614d7954..8b661b9e6d 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -240,6 +240,10 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
> >          qemu_close(s->vhost_vdpa.device_fd);
> >          s->vhost_vdpa.device_fd = -1;
> >      }
> > +    if (s->vhost_vdpa.index != 0) {
> > +        return;
> > +    }
> > +    g_free(s->vhost_vdpa.shared);
> >  }
> >
> >  /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend  */
> > @@ -1661,6 +1665,7 @@ static NetClientState 
> > *net_vhost_vdpa_init(NetClientState *peer,
> >                                         bool svq,
> >                                         struct vhost_vdpa_iova_range 
> > iova_range,
> >                                         uint64_t features,
> > +                                       VhostVDPAShared *shared,
> >                                         Error **errp)
> >  {
> >      NetClientState *nc = NULL;
> > @@ -1696,6 +1701,7 @@ static NetClientState 
> > *net_vhost_vdpa_init(NetClientState *peer,
> >      if (queue_pair_index == 0) {
> >          vhost_vdpa_net_valid_svq_features(features,
> >                                            
> > &s->vhost_vdpa.migration_blocker);
> > +        s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
> >      } else if (!is_datapath) {
> >          s->cvq_cmd_out_buffer = mmap(NULL, 
> > vhost_vdpa_net_cvq_cmd_page_len(),
> >                                       PROT_READ | PROT_WRITE,
> > @@ -1708,11 +1714,16 @@ static NetClientState 
> > *net_vhost_vdpa_init(NetClientState *peer,
> >          s->vhost_vdpa.shadow_vq_ops_opaque = s;
> >          s->cvq_isolated = cvq_isolated;
> >      }
> > +    if (queue_pair_index != 0) {
> > +        s->vhost_vdpa.shared = shared;
> > +    }
> > +
> >      ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, 
> > nvqs);
> >      if (ret) {
> >          qemu_del_net_client(nc);
> >          return NULL;
> >      }
> > +
> >      return nc;
> >  }
> >
> > @@ -1824,17 +1835,26 @@ int net_init_vhost_vdpa(const Netdev *netdev, const 
> > char *name,
> >      ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
> >
> >      for (i = 0; i < queue_pairs; i++) {
> > +        VhostVDPAShared *shared = NULL;
> > +
> > +        if (i) {
> > +            shared = DO_UPCAST(VhostVDPAState, nc, 
> > ncs[0])->vhost_vdpa.shared;
> > +        }
> >          ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> >                                       vdpa_device_fd, i, 2, true, 
> > opts->x_svq,
> > -                                     iova_range, features, errp);
> > +                                     iova_range, features, shared, errp);
> >          if (!ncs[i])
> >              goto err;
> >      }
> >
> >      if (has_cvq) {
> > +        VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]);
> > +        VhostVDPAShared *shared = s0->vhost_vdpa.shared;
> > +
> >          nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> >                                   vdpa_device_fd, i, 1, false,
> > -                                 opts->x_svq, iova_range, features, errp);
> > +                                 opts->x_svq, iova_range, features, shared,
> > +                                 errp);
> >          if (!nc)
> >              goto err;
> >      }
> > --
> > 2.39.3
> >
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]