qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 13/40] vdpa: ref counting VhostVDPAShared


From: Jason Wang
Subject: Re: [PATCH 13/40] vdpa: ref counting VhostVDPAShared
Date: Thu, 11 Jan 2024 16:12:24 +0800

On Fri, Dec 8, 2023 at 2:50 AM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
> Subsequent patches attempt to release VhostVDPAShared resources,
> for example iova tree to free and memory listener to unregister,
> in vdpa_dev_cleanup(). Instead of checking against the vq index,
> which is not always available in all of the callers, counting
> the usage by reference. Then it'll be easy to free resource
> upon the last deref.
>
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>  include/hw/virtio/vhost-vdpa.h |  2 ++
>  net/vhost-vdpa.c               | 14 ++++++++++----
>  2 files changed, 12 insertions(+), 4 deletions(-)
>
> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> index 63493ff..7b8d3bf 100644
> --- a/include/hw/virtio/vhost-vdpa.h
> +++ b/include/hw/virtio/vhost-vdpa.h
> @@ -70,6 +70,8 @@ typedef struct vhost_vdpa_shared {
>
>      /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA 
> */
>      bool shadow_data;
> +
> +    unsigned refcnt;
>  } VhostVDPAShared;
>
>  typedef struct vhost_vdpa {
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index aebaa53..a126e5c 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -236,11 +236,11 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
>          g_free(s->vhost_net);
>          s->vhost_net = NULL;
>      }
> -    if (s->vhost_vdpa.index != 0) {
> -        return;
> +    if (--s->vhost_vdpa.shared->refcnt == 0) {
> +        qemu_close(s->vhost_vdpa.shared->device_fd);
> +        g_free(s->vhost_vdpa.shared);
>      }

I'd suggest having a get and put helper, then we can check and do
cleanup in the put when refcnt is zero.

Thanks

> -    qemu_close(s->vhost_vdpa.shared->device_fd);
> -    g_free(s->vhost_vdpa.shared);
> +    s->vhost_vdpa.shared = NULL;
>  }
>
>  /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend  */
> @@ -1896,6 +1896,7 @@ static NetClientState 
> *net_vhost_vdpa_init(NetClientState *peer,
>          s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
>          s->vhost_vdpa.shared->iova_range = iova_range;
>          s->vhost_vdpa.shared->shadow_data = svq;
> +        s->vhost_vdpa.shared->refcnt++;
>      } else if (!is_datapath) {
>          s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
>                                       PROT_READ | PROT_WRITE,
> @@ -1910,6 +1911,7 @@ static NetClientState 
> *net_vhost_vdpa_init(NetClientState *peer,
>      }
>      if (queue_pair_index != 0) {
>          s->vhost_vdpa.shared = shared;
> +        s->vhost_vdpa.shared->refcnt++;
>      }
>
>      ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
> @@ -1928,6 +1930,10 @@ static NetClientState 
> *net_vhost_vdpa_init(NetClientState *peer,
>      return nc;
>
>  err:
> +    if (--s->vhost_vdpa.shared->refcnt == 0) {
> +        g_free(s->vhost_vdpa.shared);
> +    }
> +    s->vhost_vdpa.shared = NULL;
>      qemu_del_net_client(nc);
>      return NULL;
>  }
> --
> 1.8.3.1
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]