qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v6 09/10] vdpa: Add listener_shadow_vq to vhost_vdpa


From: Jason Wang
Subject: Re: [PATCH v6 09/10] vdpa: Add listener_shadow_vq to vhost_vdpa
Date: Thu, 10 Nov 2022 14:00:52 +0800

On Wed, Nov 9, 2022 at 1:08 AM Eugenio Pérez <eperezma@redhat.com> wrote:
>
> The memory listener that thells the device how to convert GPA to qemu's
> va is registered against CVQ vhost_vdpa. This series try to map the
> memory listener translations to ASID 0, while it maps the CVQ ones to
> ASID 1.
>
> Let's tell the listener if it needs to register them on iova tree or
> not.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
> v5: Solve conflict about vhost_iova_tree_remove accepting mem_region by
>     value.
> ---
>  include/hw/virtio/vhost-vdpa.h | 2 ++
>  hw/virtio/vhost-vdpa.c         | 6 +++---
>  net/vhost-vdpa.c               | 1 +
>  3 files changed, 6 insertions(+), 3 deletions(-)
>
> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> index 6560bb9d78..0c3ed2d69b 100644
> --- a/include/hw/virtio/vhost-vdpa.h
> +++ b/include/hw/virtio/vhost-vdpa.h
> @@ -34,6 +34,8 @@ typedef struct vhost_vdpa {
>      struct vhost_vdpa_iova_range iova_range;
>      uint64_t acked_features;
>      bool shadow_vqs_enabled;
> +    /* The listener must send iova tree addresses, not GPA */
> +    bool listener_shadow_vq;
>      /* IOVA mapping used by the Shadow Virtqueue */
>      VhostIOVATree *iova_tree;
>      GPtrArray *shadow_vqs;
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 8fd32ba32b..e3914fa40e 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -220,7 +220,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener 
> *listener,
>                                           vaddr, section->readonly);
>
>      llsize = int128_sub(llend, int128_make64(iova));
> -    if (v->shadow_vqs_enabled) {
> +    if (v->listener_shadow_vq) {
>          int r;
>
>          mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
> @@ -247,7 +247,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener 
> *listener,
>      return;
>
>  fail_map:
> -    if (v->shadow_vqs_enabled) {
> +    if (v->listener_shadow_vq) {
>          vhost_iova_tree_remove(v->iova_tree, mem_region);
>      }
>
> @@ -292,7 +292,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener 
> *listener,
>
>      llsize = int128_sub(llend, int128_make64(iova));
>
> -    if (v->shadow_vqs_enabled) {
> +    if (v->listener_shadow_vq) {
>          const DMAMap *result;
>          const void *vaddr = memory_region_get_ram_ptr(section->mr) +
>              section->offset_within_region +
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 85a318faca..02780ee37b 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -570,6 +570,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState 
> *peer,
>      s->vhost_vdpa.index = queue_pair_index;
>      s->always_svq = svq;
>      s->vhost_vdpa.shadow_vqs_enabled = svq;
> +    s->vhost_vdpa.listener_shadow_vq = svq;

Any chance those above two can differ?

Thanks

>      s->vhost_vdpa.iova_tree = iova_tree;
>      if (!is_datapath) {
>          s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
> --
> 2.31.1
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]