qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 12/14] vfio/migration: Block migration with vIOMMU


From: Cédric Le Goater
Subject: Re: [PATCH v4 12/14] vfio/migration: Block migration with vIOMMU
Date: Tue, 7 Mar 2023 11:22:24 +0100
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Thunderbird/102.8.0

On 3/7/23 03:02, Joao Martins wrote:
Migrating with vIOMMU will require either tracking maximum
IOMMU supported address space (e.g. 39/48 address width on Intel)
or range-track current mappings and dirty track the new ones
post starting dirty tracking. This will be done as a separate
series, so add a live migration blocker until that is fixed.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>

Reviewed-by: Cédric Le Goater <clg@redhat.com>

Thanks,

C.


---
  hw/vfio/common.c              | 46 +++++++++++++++++++++++++++++++++++
  hw/vfio/migration.c           |  5 ++++
  hw/vfio/pci.c                 |  1 +
  include/hw/vfio/vfio-common.h |  2 ++
  4 files changed, 54 insertions(+)

diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 75b4902bbcc9..7278baa82f7d 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -362,6 +362,7 @@ bool vfio_mig_active(void)
  }
static Error *multiple_devices_migration_blocker;
+static Error *giommu_migration_blocker;
static unsigned int vfio_migratable_device_num(void)
  {
@@ -413,6 +414,51 @@ void vfio_unblock_multiple_devices_migration(void)
      multiple_devices_migration_blocker = NULL;
  }
+static bool vfio_viommu_preset(void)
+{
+    VFIOAddressSpace *space;
+
+    QLIST_FOREACH(space, &vfio_address_spaces, list) {
+        if (space->as != &address_space_memory) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+int vfio_block_giommu_migration(Error **errp)
+{
+    int ret;
+
+    if (giommu_migration_blocker ||
+        !vfio_viommu_preset()) {
+        return 0;
+    }
+
+    error_setg(&giommu_migration_blocker,
+               "Migration is currently not supported with vIOMMU enabled");
+    ret = migrate_add_blocker(giommu_migration_blocker, errp);
+    if (ret < 0) {
+        error_free(giommu_migration_blocker);
+        giommu_migration_blocker = NULL;
+    }
+
+    return ret;
+}
+
+void vfio_unblock_giommu_migration(void)
+{
+    if (!giommu_migration_blocker ||
+        vfio_viommu_preset()) {
+        return;
+    }
+
+    migrate_del_blocker(giommu_migration_blocker);
+    error_free(giommu_migration_blocker);
+    giommu_migration_blocker = NULL;
+}
+
  static void vfio_set_migration_error(int err)
  {
      MigrationState *ms = migrate_get_current();
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index a2c3d9bade7f..776fd2d7cdf3 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -634,6 +634,11 @@ int vfio_migration_probe(VFIODevice *vbasedev, Error 
**errp)
          return ret;
      }
+ ret = vfio_block_giommu_migration(errp);
+    if (ret) {
+        return ret;
+    }
+
      trace_vfio_migration_probe(vbasedev->name);
      return 0;
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 939dcc3d4a9e..30a271eab38c 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -3185,6 +3185,7 @@ static void vfio_instance_finalize(Object *obj)
       */
      vfio_put_device(vdev);
      vfio_put_group(group);
+    vfio_unblock_giommu_migration();
  }
static void vfio_exitfn(PCIDevice *pdev)
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 7817ca7d8706..63f93ab54811 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -235,6 +235,8 @@ extern VFIOGroupList vfio_group_list;
  bool vfio_mig_active(void);
  int vfio_block_multiple_devices_migration(Error **errp);
  void vfio_unblock_multiple_devices_migration(void);
+int vfio_block_giommu_migration(Error **errp);
+void vfio_unblock_giommu_migration(void);
  int64_t vfio_mig_bytes_transferred(void);
#ifdef CONFIG_LINUX




reply via email to

[Prev in Thread] Current Thread [Next in Thread]