[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v4 2/2] vhost-vdpa: add support for vIOMMU
From: |
Jason Wang |
Subject: |
Re: [PATCH v4 2/2] vhost-vdpa: add support for vIOMMU |
Date: |
Thu, 27 Oct 2022 16:10:58 +0800 |
On Thu, Oct 27, 2022 at 3:41 PM Cindy Lu <lulu@redhat.com> wrote:
>
> Add support for vIOMMU. add the new function to deal with iommu MR.
> - during iommu_region_add register a specific IOMMU notifier,
> and store all notifiers in a list.
> - during iommu_region_del, compare and delete the IOMMU notifier from the list
>
> Verified in vp_vdpa and vdpa_sim_net driver
>
> Signed-off-by: Cindy Lu <lulu@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
(some nits, see below)
> ---
> hw/virtio/vhost-vdpa.c | 131 ++++++++++++++++++++++++++++++---
> include/hw/virtio/vhost-vdpa.h | 10 +++
> 2 files changed, 130 insertions(+), 11 deletions(-)
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 3ff9ce3501..407f3e9ac2 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -26,6 +26,7 @@
> #include "cpu.h"
> #include "trace.h"
> #include "qapi/error.h"
> +#include "hw/virtio/virtio-access.h"
>
> /*
> * Return one past the end of the end of section. Be careful with uint64_t
> @@ -44,7 +45,6 @@ static bool
> vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
> uint64_t iova_min,
> uint64_t iova_max)
> {
> - Int128 llend;
>
> if ((!memory_region_is_ram(section->mr) &&
> !memory_region_is_iommu(section->mr)) ||
> @@ -61,14 +61,6 @@ static bool
> vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
> return true;
> }
>
> - llend = vhost_vdpa_section_end(section);
> - if (int128_gt(llend, int128_make64(iova_max))) {
> - error_report("RAM section out of device range (max=0x%" PRIx64
> - ", end addr=0x%" PRIx64 ")",
> - iova_max, int128_get64(llend));
> - return true;
> - }
> -
> return false;
> }
>
> @@ -173,6 +165,115 @@ static void vhost_vdpa_listener_commit(MemoryListener
> *listener)
> v->iotlb_batch_begin_sent = false;
> }
>
> +static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry
> *iotlb)
> +{
> + struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n);
> +
> + hwaddr iova = iotlb->iova + iommu->iommu_offset;
> + struct vhost_vdpa *v = iommu->dev;
> + void *vaddr;
> + int ret;
> +
> + if (iotlb->target_as != &address_space_memory) {
> + error_report("Wrong target AS \"%s\", only system memory is allowed",
> + iotlb->target_as->name ? iotlb->target_as->name :
> "none");
> + return;
> + }
> + RCU_READ_LOCK_GUARD();
> + vhost_vdpa_iotlb_batch_begin_once(v);
> +
> + if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
> + bool read_only;
> +
> + if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only,
> + &address_space_memory)) {
> + return;
> + }
> + ret =
> + vhost_vdpa_dma_map(v, iova, iotlb->addr_mask + 1, vaddr,
> read_only);
> + if (ret) {
> + error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", "
> + "0x%" HWADDR_PRIx ", %p) = %d (%m)",
> + v, iova, iotlb->addr_mask + 1, vaddr, ret);
> + }
> + } else {
> + ret = vhost_vdpa_dma_unmap(v, iova, iotlb->addr_mask + 1);
> + if (ret) {
> + error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
> + "0x%" HWADDR_PRIx ") = %d (%m)",
> + v, iova, iotlb->addr_mask + 1, ret);
> + }
> + }
> +}
> +
> +static void vhost_vdpa_iommu_region_add(MemoryListener *listener,
> + MemoryRegionSection *section)
> +{
> + struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa,
> listener);
> +
> + struct vdpa_iommu *iommu;
> + Int128 end;
> + int iommu_idx;
> + IOMMUMemoryRegion *iommu_mr;
> + int ret;
> +
> + if (!memory_region_is_iommu(section->mr)) {
> + return;
Nit: So we had already had one check in the caller, there's no need to
check twice. (this could be done on top).
> + }
> +
> + iommu_mr = IOMMU_MEMORY_REGION(section->mr);
> +
> + iommu = g_malloc0(sizeof(*iommu));
> + end = int128_add(int128_make64(section->offset_within_region),
> + section->size);
> + end = int128_sub(end, int128_one());
> + iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
> + MEMTXATTRS_UNSPECIFIED);
> +
> + iommu->iommu_mr = iommu_mr;
> +
> + iommu_notifier_init(
> + &iommu->n, vhost_vdpa_iommu_map_notify, IOMMU_NOTIFIER_IOTLB_EVENTS,
> + section->offset_within_region, int128_get64(end), iommu_idx);
> + iommu->iommu_offset =
> + section->offset_within_address_space - section->offset_within_region;
> + iommu->dev = v;
> +
> + ret = memory_region_register_iommu_notifier(section->mr, &iommu->n,
> NULL);
> + if (ret) {
> + g_free(iommu);
> + return;
> + }
> +
> + QLIST_INSERT_HEAD(&v->iommu_list, iommu, iommu_next);
> + memory_region_iommu_replay(iommu->iommu_mr, &iommu->n);
> +
> + return;
> +}
> +
> +static void vhost_vdpa_iommu_region_del(MemoryListener *listener,
> + MemoryRegionSection *section)
> +{
> + struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa,
> listener);
> +
> + struct vdpa_iommu *iommu;
> +
> + if (!memory_region_is_iommu(section->mr)) {
> + return;
Ditto.
Thanks
> + }
> +
> + QLIST_FOREACH(iommu, &v->iommu_list, iommu_next)
> + {
> + if (MEMORY_REGION(iommu->iommu_mr) == section->mr &&
> + iommu->n.start == section->offset_within_region) {
> + memory_region_unregister_iommu_notifier(section->mr, &iommu->n);
> + QLIST_REMOVE(iommu, iommu_next);
> + g_free(iommu);
> + break;
> + }
> + }
> +}
> +
> static void vhost_vdpa_listener_region_add(MemoryListener *listener,
> MemoryRegionSection *section)
> {
> @@ -186,6 +287,10 @@ static void
> vhost_vdpa_listener_region_add(MemoryListener *listener,
> v->iova_range.last)) {
> return;
> }
> + if (memory_region_is_iommu(section->mr)) {
> + vhost_vdpa_iommu_region_add(listener, section);
> + return;
> + }
>
> if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK)
> !=
> (section->offset_within_region & ~TARGET_PAGE_MASK))) {
> @@ -260,6 +365,10 @@ static void
> vhost_vdpa_listener_region_del(MemoryListener *listener,
> v->iova_range.last)) {
> return;
> }
> + if (memory_region_is_iommu(section->mr)) {
> + vhost_vdpa_iommu_region_del(listener, section);
> + return;
> + }
>
> if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK)
> !=
> (section->offset_within_region & ~TARGET_PAGE_MASK))) {
> @@ -587,7 +696,6 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
> v = dev->opaque;
> trace_vhost_vdpa_cleanup(dev, v);
> vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
> - memory_listener_unregister(&v->listener);
> vhost_vdpa_svq_cleanup(dev);
>
> dev->opaque = NULL;
> @@ -1127,7 +1235,8 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev,
> bool started)
> }
>
> if (started) {
> - memory_listener_register(&v->listener, &address_space_memory);
> + memory_listener_register(&v->listener, dev->vdev->dma_as);
> +
> return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
> } else {
> vhost_vdpa_reset_device(dev);
> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> index d10a89303e..64a46e37cb 100644
> --- a/include/hw/virtio/vhost-vdpa.h
> +++ b/include/hw/virtio/vhost-vdpa.h
> @@ -41,8 +41,18 @@ typedef struct vhost_vdpa {
> void *shadow_vq_ops_opaque;
> struct vhost_dev *dev;
> VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
> + QLIST_HEAD(, vdpa_iommu) iommu_list;
> + IOMMUNotifier n;
> } VhostVDPA;
>
> +struct vdpa_iommu {
> + struct vhost_vdpa *dev;
> + IOMMUMemoryRegion *iommu_mr;
> + hwaddr iommu_offset;
> + IOMMUNotifier n;
> + QLIST_ENTRY(vdpa_iommu) iommu_next;
> +};
> +
> int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
> void *vaddr, bool readonly);
> int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size);
> --
> 2.34.3
>
- [PATCH v4 1/2] vfio: move the function vfio_get_xlat_addr() to memory.c, (continued)
- [PATCH v4 1/2] vfio: move the function vfio_get_xlat_addr() to memory.c, Cindy Lu, 2022/10/27
- Re: [PATCH v4 1/2] vfio: move the function vfio_get_xlat_addr() to memory.c, Jason Wang, 2022/10/27
- Re: [PATCH v4 1/2] vfio: move the function vfio_get_xlat_addr() to memory.c, Peter Xu, 2022/10/27
- Re: [PATCH v4 1/2] vfio: move the function vfio_get_xlat_addr() to memory.c, Alex Williamson, 2022/10/27
- Re: [PATCH v4 1/2] vfio: move the function vfio_get_xlat_addr() to memory.c, Jason Wang, 2022/10/27
- Re: [PATCH v4 1/2] vfio: move the function vfio_get_xlat_addr() to memory.c, Alex Williamson, 2022/10/27
- Re: [PATCH v4 1/2] vfio: move the function vfio_get_xlat_addr() to memory.c, Jason Wang, 2022/10/27
- Re: [PATCH v4 1/2] vfio: move the function vfio_get_xlat_addr() to memory.c, Alex Williamson, 2022/10/27
- Re: [PATCH v4 1/2] vfio: move the function vfio_get_xlat_addr() to memory.c, Jason Wang, 2022/10/27
[PATCH v4 2/2] vhost-vdpa: add support for vIOMMU, Cindy Lu, 2022/10/27
- Re: [PATCH v4 2/2] vhost-vdpa: add support for vIOMMU,
Jason Wang <=
Re: [PATCH v4 0/2] vhost-vdpa: add support for vIOMMU, Michael S. Tsirkin, 2022/10/29