qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC v3 28/29] vhost-vdpa: never map with vDPA listener


From: Eugenio Pérez
Subject: [RFC v3 28/29] vhost-vdpa: never map with vDPA listener
Date: Wed, 19 May 2021 18:29:02 +0200

This commit is a workaround that will not go to the final version.

vp_vdpa is not able to reset all IOTLBs, so we force to not to map them
in the first place.

Checkpath detects a few errors because of #if 0 / #endif pairs, but it's
the less intrusive way to comment out all the code we want to skip.
Since this commit is not intended to go to the final series, I left it
that way.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 include/hw/virtio/vhost-vdpa.h | 2 +-
 hw/virtio/vhost-vdpa.c         | 8 +++++++-
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 9b81a409da..06afe42ab6 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -17,7 +17,7 @@
 typedef struct vhost_vdpa {
     int device_fd;
     uint32_t msg_type;
-    MemoryListener listener;
+    /* MemoryListener listener; */
     struct vhost_dev *dev;
 } VhostVDPA;
 
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index dfb465be96..30e4e306fb 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -23,6 +23,7 @@
 #include "trace.h"
 #include "qemu-common.h"
 
+#if 0
 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
 {
     return (!memory_region_is_ram(section->mr) &&
@@ -35,6 +36,7 @@ static bool 
vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
             */
            section->offset_within_address_space & (1ULL << 63);
 }
+#endif
 
 int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
                               void *vaddr, bool readonly)
@@ -62,6 +64,7 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, 
hwaddr size,
     return ret;
 }
 
+#if 0
 static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
                                 hwaddr size)
 {
@@ -246,6 +249,7 @@ static const MemoryListener vhost_vdpa_memory_listener = {
     .region_add = vhost_vdpa_listener_region_add,
     .region_del = vhost_vdpa_listener_region_del,
 };
+#endif
 
 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
                              void *arg)
@@ -274,6 +278,7 @@ static void vhost_vdpa_add_status(struct vhost_dev *dev, 
uint8_t status)
 
 static int vhost_vdpa_enable_custom_iommu(struct vhost_dev *dev, bool enable)
 {
+#if 0
     struct vhost_vdpa *v = dev->opaque;
     hwaddr iova_range_last = dev->iova_range.last;
     if (iova_range_last != (hwaddr)-1) {
@@ -291,6 +296,7 @@ static int vhost_vdpa_enable_custom_iommu(struct vhost_dev 
*dev, bool enable)
         memory_listener_unregister(&v->listener);
         return vhost_vdpa_dma_unmap(v, dev->iova_range.first, iova_range_last);
     }
+#endif
 
     return 0;
 }
@@ -307,7 +313,7 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void 
*opaque)
     dev->opaque =  opaque ;
     vhost_vdpa_call(dev, VHOST_GET_FEATURES, &features);
     dev->backend_features = features;
-    v->listener = vhost_vdpa_memory_listener;
+    /* v->listener = vhost_vdpa_memory_listener; */
     v->msg_type = VHOST_IOTLB_MSG_V2;
 
     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
-- 
2.27.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]