qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC v3 13/29] vhost: Add vhost_get_iova_range operation


From: Jason Wang
Subject: Re: [RFC v3 13/29] vhost: Add vhost_get_iova_range operation
Date: Wed, 26 May 2021 09:14:04 +0800
User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Thunderbird/78.10.2


在 2021/5/20 上午12:28, Eugenio Pérez 写道:
For simplicity, If a device does not support this operation it means
that it can handle full (uint64_t)-1 iova address.


Note that, we probably need a separated patch for this.

And we need to this during vhost-vdpa initialization. If GPA is out of the range, we need to fail the start of vhost-vdpa.

THanks



Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
  include/hw/virtio/vhost-backend.h |  5 +++++
  hw/virtio/vhost-vdpa.c            | 18 ++++++++++++++++++
  hw/virtio/trace-events            |  1 +
  3 files changed, 24 insertions(+)

diff --git a/include/hw/virtio/vhost-backend.h 
b/include/hw/virtio/vhost-backend.h
index 94d3323905..bcb112c166 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -36,6 +36,7 @@ struct vhost_vring_addr;
  struct vhost_scsi_target;
  struct vhost_iotlb_msg;
  struct vhost_virtqueue;
+struct vhost_vdpa_iova_range;
typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
  typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
@@ -127,6 +128,9 @@ typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
typedef int (*vhost_vring_pause_op)(struct vhost_dev *dev); +typedef int (*vhost_get_iova_range)(struct vhost_dev *dev,
+                                    hwaddr *first, hwaddr *last);
+
  typedef struct VhostOps {
      VhostBackendType backend_type;
      vhost_backend_init vhost_backend_init;
@@ -173,6 +177,7 @@ typedef struct VhostOps {
      vhost_get_device_id_op vhost_get_device_id;
      vhost_vring_pause_op vhost_vring_pause;
      vhost_force_iommu_op vhost_force_iommu;
+    vhost_get_iova_range vhost_get_iova_range;
  } VhostOps;
extern const VhostOps user_ops;
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 01d2101d09..74fe92935e 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -579,6 +579,23 @@ static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
      return true;
  }
+static int vhost_vdpa_get_iova_range(struct vhost_dev *dev,
+                                     hwaddr *first, hwaddr *last)
+{
+    int ret;
+    struct vhost_vdpa_iova_range range;
+
+    ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_IOVA_RANGE, &range);
+    if (ret != 0) {
+        return ret;
+    }
+
+    *first = range.first;
+    *last = range.last;
+    trace_vhost_vdpa_get_iova_range(dev, *first, *last);
+    return ret;
+}
+
  const VhostOps vdpa_ops = {
          .backend_type = VHOST_BACKEND_TYPE_VDPA,
          .vhost_backend_init = vhost_vdpa_init,
@@ -611,4 +628,5 @@ const VhostOps vdpa_ops = {
          .vhost_get_device_id = vhost_vdpa_get_device_id,
          .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
          .vhost_force_iommu = vhost_vdpa_force_iommu,
+        .vhost_get_iova_range = vhost_vdpa_get_iova_range,
  };
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index c62727f879..5debe3a681 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -52,6 +52,7 @@ vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) 
"dev: %p index:
  vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 
0x%"PRIx64
  vhost_vdpa_set_owner(void *dev) "dev: %p"
  vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t 
used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 
0x%"PRIx64" used_user_addr: 0x%"PRIx64
+vhost_vdpa_get_iova_range(void *dev, uint64_t first, uint64_t last) "dev: %p first: 
0x%"PRIx64" last: 0x%"PRIx64
# virtio.c
  virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) 
"elem %p size %zd in_num %u out_num %u"




reply via email to

[Prev in Thread] Current Thread [Next in Thread]