qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC v2 2/8] virtio: memory cache for packed ring


From: Maxime Coquelin
Subject: Re: [Qemu-devel] [RFC v2 2/8] virtio: memory cache for packed ring
Date: Thu, 20 Sep 2018 16:18:18 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Thunderbird/52.9.1



On 06/05/2018 09:07 PM, address@hidden wrote:
From: Wei Xu <address@hidden>

Mostly reuse memory cache with 1.0 except for the offset calculation.

Signed-off-by: Wei Xu <address@hidden>
---
  hw/virtio/virtio.c | 29 ++++++++++++++++++++---------
  1 file changed, 20 insertions(+), 9 deletions(-)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index e192a9a..f6c0689 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -150,11 +150,8 @@ static void virtio_init_region_cache(VirtIODevice *vdev, 
int n)
      VRingMemoryRegionCaches *old = vq->vring.caches;
      VRingMemoryRegionCaches *new;
      hwaddr addr, size;
-    int event_size;
      int64_t len;
- event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
-
      addr = vq->vring.desc;
      if (!addr) {
          return;
@@ -168,7 +165,7 @@ static void virtio_init_region_cache(VirtIODevice *vdev, 
int n)
          goto err_desc;
      }

In the case of packed ring layout, the region's cache for the descs has
to be initialized as write-able, as the descriptors are written-back by
the device.

Following patch fixes the assert I'm facing, but we might want to
differentiate the split and packed cases as it is read-only in split
case:

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 012c0925f2..4b165aaf2c 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -173,7 +173,7 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n)
     new = g_new0(VRingMemoryRegionCaches, 1);
     size = virtio_queue_get_desc_size(vdev, n);
     len = address_space_cache_init(&new->desc, vdev->dma_as,
-                                   addr, size, false);
+                                   addr, size, true);
     if (len < size) {
         virtio_error(vdev, "Cannot map desc");
         goto err_desc;


-    size = virtio_queue_get_used_size(vdev, n) + event_size;
+    size = virtio_queue_get_used_size(vdev, n);
      len = address_space_cache_init(&new->used, vdev->dma_as,
                                     vq->vring.used, size, true);
      if (len < size) {
@@ -176,7 +173,7 @@ static void virtio_init_region_cache(VirtIODevice *vdev, 
int n)
          goto err_used;
      }
- size = virtio_queue_get_avail_size(vdev, n) + event_size;
+    size = virtio_queue_get_avail_size(vdev, n);
      len = address_space_cache_init(&new->avail, vdev->dma_as,
                                     vq->vring.avail, size, false);
      if (len < size) {
@@ -2320,14 +2317,28 @@ hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, 
int n)
hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
  {
-    return offsetof(VRingAvail, ring) +
-        sizeof(uint16_t) * vdev->vq[n].vring.num;
+    int s;
+
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        return sizeof(struct VRingPackedDescEvent);
+    } else {
+        s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+        return offsetof(VRingAvail, ring) +
+            sizeof(uint16_t) * vdev->vq[n].vring.num + s;
+    }
  }
hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
  {
-    return offsetof(VRingUsed, ring) +
-        sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
+    int s;
+
+    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+        return sizeof(struct VRingPackedDescEvent);
+    } else {
+        s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+        return offsetof(VRingUsed, ring) +
+            sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
+    }
  }
uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)




reply via email to

[Prev in Thread] Current Thread [Next in Thread]