qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 6/7] vhost-vdpa: change name and polarity for vhost_vdpa_o


From: Jason Wang
Subject: Re: [PATCH v4 6/7] vhost-vdpa: change name and polarity for vhost_vdpa_one_time_request()
Date: Tue, 10 May 2022 14:14:27 +0800

On Sat, May 7, 2022 at 10:28 AM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
> The name vhost_vdpa_one_time_request() was confusing. No
> matter whatever it returns, its typical occurrence had
> always been at requests that only need to be applied once.
> And the name didn't suggest what it actually checks for.
> Change it to vhost_vdpa_first_dev() with polarity flipped
> for better readibility of code. That way it is able to
> reflect what the check is really about.
>
> This call is applicable to request which performs operation
> only once, before queues are set up, and usually at the beginning
> of the caller function. Document the requirement for it in place.
>
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>

Acked-by: Jason Wang <jasowang@redhat.com>

> ---
>  hw/virtio/vhost-vdpa.c | 23 +++++++++++++++--------
>  1 file changed, 15 insertions(+), 8 deletions(-)
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 6e3dbd9..33dcaa1 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -366,11 +366,18 @@ static void vhost_vdpa_get_iova_range(struct vhost_vdpa 
> *v)
>                                      v->iova_range.last);
>  }
>
> -static bool vhost_vdpa_one_time_request(struct vhost_dev *dev)
> +/*
> + * The use of this function is for requests that only need to be
> + * applied once. Typically such request occurs at the beginning
> + * of operation, and before setting up queues. It should not be
> + * used for request that performs operation until all queues are
> + * set, which would need to check dev->vq_index_end instead.
> + */
> +static bool vhost_vdpa_first_dev(struct vhost_dev *dev)
>  {
>      struct vhost_vdpa *v = dev->opaque;
>
> -    return v->index != 0;
> +    return v->index == 0;
>  }
>
>  static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
> @@ -451,7 +458,7 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void 
> *opaque, Error **errp)
>
>      vhost_vdpa_get_iova_range(v);
>
> -    if (vhost_vdpa_one_time_request(dev)) {
> +    if (!vhost_vdpa_first_dev(dev)) {
>          return 0;
>      }
>
> @@ -594,7 +601,7 @@ static int vhost_vdpa_memslots_limit(struct vhost_dev 
> *dev)
>  static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
>                                      struct vhost_memory *mem)
>  {
> -    if (vhost_vdpa_one_time_request(dev)) {
> +    if (!vhost_vdpa_first_dev(dev)) {
>          return 0;
>      }
>
> @@ -623,7 +630,7 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev,
>      struct vhost_vdpa *v = dev->opaque;
>      int ret;
>
> -    if (vhost_vdpa_one_time_request(dev)) {
> +    if (!vhost_vdpa_first_dev(dev)) {
>          return 0;
>      }
>
> @@ -665,7 +672,7 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev 
> *dev)
>
>      features &= f;
>
> -    if (!vhost_vdpa_one_time_request(dev)) {
> +    if (vhost_vdpa_first_dev(dev)) {
>          r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
>          if (r) {
>              return -EFAULT;
> @@ -1118,7 +1125,7 @@ static int vhost_vdpa_set_log_base(struct vhost_dev 
> *dev, uint64_t base,
>                                       struct vhost_log *log)
>  {
>      struct vhost_vdpa *v = dev->opaque;
> -    if (v->shadow_vqs_enabled || vhost_vdpa_one_time_request(dev)) {
> +    if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) {
>          return 0;
>      }
>
> @@ -1240,7 +1247,7 @@ static int vhost_vdpa_get_features(struct vhost_dev 
> *dev,
>
>  static int vhost_vdpa_set_owner(struct vhost_dev *dev)
>  {
> -    if (vhost_vdpa_one_time_request(dev)) {
> +    if (!vhost_vdpa_first_dev(dev)) {
>          return 0;
>      }
>
> --
> 1.8.3.1
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]