qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH Kernel v19 3/8] vfio iommu: Cache pgsize_bitmap in struct vfio_io


From: Kirti Wankhede
Subject: [PATCH Kernel v19 3/8] vfio iommu: Cache pgsize_bitmap in struct vfio_iommu
Date: Thu, 14 May 2020 01:34:34 +0530

Calculate and cache pgsize_bitmap when iommu->domain_list is updated.
Add iommu->lock protection when cached pgsize_bitmap is accessed.

Signed-off-by: Kirti Wankhede <address@hidden>
Reviewed-by: Neo Jia <address@hidden>
---
 drivers/vfio/vfio_iommu_type1.c | 87 +++++++++++++++++++++++------------------
 1 file changed, 48 insertions(+), 39 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index fa735047b04d..6f09fbabed12 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -69,6 +69,7 @@ struct vfio_iommu {
        struct rb_root          dma_list;
        struct blocking_notifier_head notifier;
        unsigned int            dma_avail;
+       uint64_t                pgsize_bitmap;
        bool                    v2;
        bool                    nesting;
 };
@@ -805,15 +806,14 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, 
struct vfio_dma *dma)
        iommu->dma_avail++;
 }
 
-static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
+static void vfio_pgsize_bitmap(struct vfio_iommu *iommu)
 {
        struct vfio_domain *domain;
-       unsigned long bitmap = ULONG_MAX;
 
-       mutex_lock(&iommu->lock);
+       iommu->pgsize_bitmap = ULONG_MAX;
+
        list_for_each_entry(domain, &iommu->domain_list, next)
-               bitmap &= domain->domain->pgsize_bitmap;
-       mutex_unlock(&iommu->lock);
+               iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap;
 
        /*
         * In case the IOMMU supports page sizes smaller than PAGE_SIZE
@@ -823,12 +823,10 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu 
*iommu)
         * granularity while iommu driver can use the sub-PAGE_SIZE size
         * to map the buffer.
         */
-       if (bitmap & ~PAGE_MASK) {
-               bitmap &= PAGE_MASK;
-               bitmap |= PAGE_SIZE;
+       if (iommu->pgsize_bitmap & ~PAGE_MASK) {
+               iommu->pgsize_bitmap &= PAGE_MASK;
+               iommu->pgsize_bitmap |= PAGE_SIZE;
        }
-
-       return bitmap;
 }
 
 static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
@@ -839,19 +837,28 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
        size_t unmapped = 0;
        int ret = 0, retries = 0;
 
-       mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
+       mutex_lock(&iommu->lock);
+
+       mask = ((uint64_t)1 << __ffs(iommu->pgsize_bitmap)) - 1;
+
+       if (unmap->iova & mask) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       if (!unmap->size || unmap->size & mask) {
+               ret = -EINVAL;
+               goto unlock;
+       }
 
-       if (unmap->iova & mask)
-               return -EINVAL;
-       if (!unmap->size || unmap->size & mask)
-               return -EINVAL;
        if (unmap->iova + unmap->size - 1 < unmap->iova ||
-           unmap->size > SIZE_MAX)
-               return -EINVAL;
+           unmap->size > SIZE_MAX) {
+               ret = -EINVAL;
+               goto unlock;
+       }
 
        WARN_ON(mask & PAGE_MASK);
 again:
-       mutex_lock(&iommu->lock);
 
        /*
         * vfio-iommu-type1 (v1) - User mappings were coalesced together to
@@ -930,6 +937,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
                        blocking_notifier_call_chain(&iommu->notifier,
                                                    VFIO_IOMMU_NOTIFY_DMA_UNMAP,
                                                    &nb_unmap);
+                       mutex_lock(&iommu->lock);
                        goto again;
                }
                unmapped += dma->size;
@@ -1045,24 +1053,28 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
        if (map->size != size || map->vaddr != vaddr || map->iova != iova)
                return -EINVAL;
 
-       mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
-
-       WARN_ON(mask & PAGE_MASK);
-
        /* READ/WRITE from device perspective */
        if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
                prot |= IOMMU_WRITE;
        if (map->flags & VFIO_DMA_MAP_FLAG_READ)
                prot |= IOMMU_READ;
 
-       if (!prot || !size || (size | iova | vaddr) & mask)
-               return -EINVAL;
+       mutex_lock(&iommu->lock);
 
-       /* Don't allow IOVA or virtual address wrap */
-       if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
-               return -EINVAL;
+       mask = ((uint64_t)1 << __ffs(iommu->pgsize_bitmap)) - 1;
 
-       mutex_lock(&iommu->lock);
+       WARN_ON(mask & PAGE_MASK);
+
+       if (!prot || !size || (size | iova | vaddr) & mask) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       /* Don't allow IOVA or virtual address wrap */
+       if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
 
        if (vfio_find_dma(iommu, iova, size)) {
                ret = -EEXIST;
@@ -1793,6 +1805,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
        }
 
        list_add(&domain->next, &iommu->domain_list);
+       vfio_pgsize_bitmap(iommu);
 done:
        /* Delete the old one and insert new iova list */
        vfio_iommu_iova_insert_copy(iommu, &iova_copy);
@@ -2004,6 +2017,7 @@ static void vfio_iommu_type1_detach_group(void 
*iommu_data,
                        list_del(&domain->next);
                        kfree(domain);
                        vfio_iommu_aper_expand(iommu, &iova_copy);
+                       vfio_pgsize_bitmap(iommu);
                }
                break;
        }
@@ -2136,8 +2150,6 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu 
*iommu,
        size_t size;
        int iovas = 0, i = 0, ret;
 
-       mutex_lock(&iommu->lock);
-
        list_for_each_entry(iova, &iommu->iova_list, list)
                iovas++;
 
@@ -2146,17 +2158,14 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu 
*iommu,
                 * Return 0 as a container with a single mdev device
                 * will have an empty list
                 */
-               ret = 0;
-               goto out_unlock;
+               return 0;
        }
 
        size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
 
        cap_iovas = kzalloc(size, GFP_KERNEL);
-       if (!cap_iovas) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
+       if (!cap_iovas)
+               return -ENOMEM;
 
        cap_iovas->nr_iovas = iovas;
 
@@ -2169,8 +2178,6 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu 
*iommu,
        ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size);
 
        kfree(cap_iovas);
-out_unlock:
-       mutex_unlock(&iommu->lock);
        return ret;
 }
 
@@ -2215,11 +2222,13 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
                        info.cap_offset = 0; /* output, no-recopy necessary */
                }
 
+               mutex_lock(&iommu->lock);
                info.flags = VFIO_IOMMU_INFO_PGSIZES;
 
-               info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
+               info.iova_pgsizes = iommu->pgsize_bitmap;
 
                ret = vfio_iommu_iova_build_caps(iommu, &caps);
+               mutex_unlock(&iommu->lock);
                if (ret)
                        return ret;
 
-- 
2.7.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]