qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC PATCH qemu v3 3/4] vfio: Store IOMMU type in container


From: Alexey Kardashevskiy
Subject: [Qemu-devel] [RFC PATCH qemu v3 3/4] vfio: Store IOMMU type in container
Date: Tue, 14 Jul 2015 22:21:53 +1000

So far we were managing not to have an IOMMU type stored anywhere but
since we are going to implement different behavior for different IOMMU
types in the same memory listener, we need to know IOMMU type after
initialization.

This adds an IOMMU type into VFIOContainer and initializes it. This
adds SPAPR IOMMU data into the iommu_data union; for now it only includes
the existing Type1 data struct. Since zero is not used for any type,
no additional initialization is necessary for VFIOContainer::type.

This reworks vfio_listener_region_add() in order to prepare it to
handle RAM regions on IOMMUs other than Type1/Type1v2.

This should cause no behavioral change.

Signed-off-by: Alexey Kardashevskiy <address@hidden>
---
Changes:
v3:
* folded vfio_listener_region_add() change into this

v2:
* added VFIOContainer::iommu_data::spapr
---
 hw/vfio/common.c              | 55 ++++++++++++++++++++++++++-----------------
 include/hw/vfio/vfio-common.h |  6 +++++
 2 files changed, 39 insertions(+), 22 deletions(-)

diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index aad41e1..6982b8f 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -393,26 +393,36 @@ static void vfio_listener_region_add(VFIOContainer 
*container,
             section->offset_within_region +
             (iova - section->offset_within_address_space);
 
-    trace_vfio_listener_region_add_ram(iova, end - 1, vaddr);
+    switch (container->iommu_data.type) {
+    case VFIO_TYPE1_IOMMU:
+    case VFIO_TYPE1v2_IOMMU:
+        trace_vfio_listener_region_add_ram(iova, end - 1, vaddr);
 
-    ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly);
-    if (ret) {
-        error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
-                     "0x%"HWADDR_PRIx", %p) = %d (%m)",
-                     container, iova, end - iova, vaddr, ret);
+        ret = vfio_dma_map(container, iova, end - iova, vaddr, 
section->readonly);
+        if (ret) {
+            error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
+                         "0x%"HWADDR_PRIx", %p) = %d (%m)",
+                         container, iova, end - iova, vaddr, ret);
+            goto error_exit;
+        }
+        break;
+    }
+
+    return;
+
+error_exit:
 
-        /*
-         * On the initfn path, store the first error in the container so we
-         * can gracefully fail.  Runtime, there's not much we can do other
-         * than throw a hardware error.
-         */
-        if (!container->iommu_data.type1.initialized) {
-            if (!container->iommu_data.type1.error) {
-                container->iommu_data.type1.error = ret;
-            }
-        } else {
-            hw_error("vfio: DMA mapping failed, unable to continue");
+    /*
+     * On the initfn path, store the first error in the container so we
+     * can gracefully fail.  Runtime, there's not much we can do other
+     * than throw a hardware error.
+     */
+    if (!container->iommu_data.type1.initialized) {
+        if (!container->iommu_data.type1.error) {
+            container->iommu_data.type1.error = ret;
         }
+    } else {
+        hw_error("vfio: DMA mapping failed, unable to continue");
     }
 }
 
@@ -733,8 +743,8 @@ static int vfio_connect_container(VFIOGroup *group, 
AddressSpace *as)
             goto free_container_exit;
         }
 
-        ret = ioctl(fd, VFIO_SET_IOMMU,
-                    v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU);
+        container->iommu_data.type = v2 ? VFIO_TYPE1v2_IOMMU : 
VFIO_TYPE1_IOMMU;
+        ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_data.type);
         if (ret) {
             error_report("vfio: failed to set iommu for container: %m");
             ret = -errno;
@@ -762,7 +772,8 @@ static int vfio_connect_container(VFIOGroup *group, 
AddressSpace *as)
             ret = -errno;
             goto free_container_exit;
         }
-        ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU);
+        container->iommu_data.type = VFIO_SPAPR_TCE_IOMMU;
+        ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_data.type);
         if (ret) {
             error_report("vfio: failed to set iommu for container: %m");
             ret = -errno;
@@ -781,10 +792,10 @@ static int vfio_connect_container(VFIOGroup *group, 
AddressSpace *as)
             goto free_container_exit;
         }
 
-        container->iommu_data.type1.listener = vfio_spapr_iommu_listener;
+        container->iommu_data.spapr.common.listener = 
vfio_spapr_iommu_listener;
         container->iommu_data.release = vfio_listener_release;
 
-        memory_listener_register(&container->iommu_data.type1.listener,
+        memory_listener_register(&container->iommu_data.spapr.common.listener,
                                  container->space->as);
 
     } else {
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 59a321d..135ea64 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -70,13 +70,19 @@ typedef struct VFIOType1 {
     bool initialized;
 } VFIOType1;
 
+typedef struct VFIOSPAPR {
+    VFIOType1 common;
+} VFIOSPAPR;
+
 typedef struct VFIOContainer {
     VFIOAddressSpace *space;
     int fd; /* /dev/vfio/vfio, empowered by the attached groups */
     struct {
         /* enable abstraction to support various iommu backends */
+        unsigned type;
         union {
             VFIOType1 type1;
+            VFIOSPAPR spapr;
         };
         void (*release)(struct VFIOContainer *);
     } iommu_data;
-- 
2.4.0.rc3.8.gfb3e7d5




reply via email to

[Prev in Thread] Current Thread [Next in Thread]