[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v2 18/23] vfio-user: add dma_unmap_all
From: |
Alex Williamson |
Subject: |
Re: [PATCH v2 18/23] vfio-user: add dma_unmap_all |
Date: |
Mon, 6 Feb 2023 14:29:04 -0700 |
On Wed, 1 Feb 2023 21:55:54 -0800
John Johnson <john.g.johnson@oracle.com> wrote:
> Signed-off-by: John G Johnson <john.g.johnson@oracle.com>
> Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
> Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
> ---
> include/hw/vfio/vfio-common.h | 1 +
> hw/vfio/common.c | 45
> ++++++++++++++++++++++++++++++++++---------
> hw/vfio/user.c | 24 +++++++++++++++++++++++
> 3 files changed, 61 insertions(+), 9 deletions(-)
This is inventing a need for a kernel vs user callback op rather than
probing support for VFIO_UNMAP_ALL and using VFIO_DMA_UNMAP_FLAG_VADDR
consistently. The flags arg of the dma_unmap_all() op is unused and
the kernel implementation introduces hard coded magic values. The
vfio-user dirty_bitmap addition here is rather random. Please include
some sort of description in the commit log for all patches. Thanks,
Alex
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index ee6ad8f..abef9b4 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -193,6 +193,7 @@ struct VFIOContainerIO {
> int (*dma_unmap)(VFIOContainer *container,
> struct vfio_iommu_type1_dma_unmap *unmap,
> struct vfio_bitmap *bitmap);
> + int (*dma_unmap_all)(VFIOContainer *container, uint32_t flags);
> int (*dirty_bitmap)(VFIOContainer *container,
> struct vfio_iommu_type1_dirty_bitmap *bitmap,
> struct vfio_iommu_type1_dirty_bitmap_get *range);
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index f04fd20..8b55fbb 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -508,6 +508,14 @@ static int vfio_dma_unmap(VFIOContainer *container,
> return container->io->dma_unmap(container, &unmap, NULL);
> }
>
> +/*
> + * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
> + */
> +static int vfio_dma_unmap_all(VFIOContainer *container)
> +{
> + return container->io->dma_unmap_all(container, VFIO_DMA_UNMAP_FLAG_ALL);
> +}
> +
> static int vfio_dma_map(VFIOContainer *container, MemoryRegion *mr, hwaddr
> iova,
> ram_addr_t size, void *vaddr, bool readonly)
> {
> @@ -1256,17 +1264,10 @@ static void vfio_listener_region_del(MemoryListener
> *listener,
>
> if (try_unmap) {
> if (int128_eq(llsize, int128_2_64())) {
> - /* The unmap ioctl doesn't accept a full 64-bit span. */
> - llsize = int128_rshift(llsize, 1);
> + ret = vfio_dma_unmap_all(container);
> + } else {
> ret = vfio_dma_unmap(container, iova, int128_get64(llsize),
> NULL);
> - if (ret) {
> - error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
> - "0x%"HWADDR_PRIx") = %d (%m)",
> - container, iova, int128_get64(llsize), ret);
> - }
> - iova += int128_get64(llsize);
> }
> - ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
> if (ret) {
> error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
> "0x%"HWADDR_PRIx") = %d (%m)",
> @@ -2867,6 +2868,31 @@ static int vfio_io_dma_unmap(VFIOContainer *container,
> return 0;
> }
>
> +static int vfio_io_dma_unmap_all(VFIOContainer *container, uint32_t flags)
> +{
> + struct vfio_iommu_type1_dma_unmap unmap = {
> + .argsz = sizeof(unmap),
> + .flags = 0,
> + .size = 0x8000000000000000,
> + };
> + int ret;
> +
> + /* The unmap ioctl doesn't accept a full 64-bit span. */
> + unmap.iova = 0;
> + ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap);
> + if (ret) {
> + return -errno;
> + }
> +
> + unmap.iova += unmap.size;
> + ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap);
> + if (ret) {
> + return -errno;
> + }
> +
> + return 0;
> +}
> +
> static int vfio_io_dirty_bitmap(VFIOContainer *container,
> struct vfio_iommu_type1_dirty_bitmap *bitmap,
> struct vfio_iommu_type1_dirty_bitmap_get
> *range)
> @@ -2886,6 +2912,7 @@ static void vfio_io_wait_commit(VFIOContainer
> *container)
> static VFIOContainerIO vfio_cont_io_ioctl = {
> .dma_map = vfio_io_dma_map,
> .dma_unmap = vfio_io_dma_unmap,
> + .dma_unmap_all = vfio_io_dma_unmap_all,
> .dirty_bitmap = vfio_io_dirty_bitmap,
> .wait_commit = vfio_io_wait_commit,
> };
> diff --git a/hw/vfio/user.c b/hw/vfio/user.c
> index 6dee775..fe6e476 100644
> --- a/hw/vfio/user.c
> +++ b/hw/vfio/user.c
> @@ -1825,6 +1825,28 @@ static int vfio_user_io_dma_unmap(VFIOContainer
> *container,
> container->async_ops);
> }
>
> +static int vfio_user_io_dma_unmap_all(VFIOContainer *container, uint32_t
> flags)
> +{
> + struct vfio_iommu_type1_dma_unmap unmap = {
> + .argsz = sizeof(unmap),
> + .flags = flags | VFIO_DMA_UNMAP_FLAG_ALL,
> + .iova = 0,
> + .size = 0,
> + };
> +
> + return vfio_user_dma_unmap(container->proxy, &unmap, NULL,
> + container->async_ops);
> +}
> +
> +static int vfio_user_io_dirty_bitmap(VFIOContainer *container,
> + struct vfio_iommu_type1_dirty_bitmap *bitmap,
> + struct vfio_iommu_type1_dirty_bitmap_get *range)
> +{
> +
> + /* vfio-user doesn't support migration */
> + return -EINVAL;
> +}
> +
> static void vfio_user_io_wait_commit(VFIOContainer *container)
> {
> vfio_user_wait_reqs(container->proxy);
> @@ -1833,5 +1855,7 @@ static void vfio_user_io_wait_commit(VFIOContainer
> *container)
> VFIOContainerIO vfio_cont_io_sock = {
> .dma_map = vfio_user_io_dma_map,
> .dma_unmap = vfio_user_io_dma_unmap,
> + .dma_unmap_all = vfio_user_io_dma_unmap_all,
> + .dirty_bitmap = vfio_user_io_dirty_bitmap,
> .wait_commit = vfio_user_io_wait_commit,
> };
- Re: [PATCH v2 17/23] vfio-user: dma map/unmap operations, (continued)
[PATCH v2 14/23] vfio-user: get and set IRQs, John Johnson, 2023/02/02
[PATCH v2 18/23] vfio-user: add dma_unmap_all, John Johnson, 2023/02/02
- Re: [PATCH v2 18/23] vfio-user: add dma_unmap_all,
Alex Williamson <=
[PATCH v2 23/23] vfio-user: add coalesced posted writes, John Johnson, 2023/02/02
[PATCH v2 19/23] vfio-user: no-mmap DMA support, John Johnson, 2023/02/02
[PATCH v2 20/23] vfio-user: dma read/write operations, John Johnson, 2023/02/02