[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH v9 09/23] vhost: Add SVQElement
From: |
Eugenio Pérez |
Subject: |
[RFC PATCH v9 09/23] vhost: Add SVQElement |
Date: |
Wed, 6 Jul 2022 20:39:54 +0200 |
This will allow SVQ to add metadata to the different queue elements. To
simplify changes, only store actual element at this patch.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
hw/virtio/vhost-shadow-virtqueue.h | 8 ++++--
hw/virtio/vhost-shadow-virtqueue.c | 41 ++++++++++++++++++++----------
2 files changed, 33 insertions(+), 16 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.h
b/hw/virtio/vhost-shadow-virtqueue.h
index 0fbdd69153..e434dc63b0 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -15,6 +15,10 @@
#include "standard-headers/linux/vhost_types.h"
#include "hw/virtio/vhost-iova-tree.h"
+typedef struct SVQElement {
+ VirtQueueElement *elem;
+} SVQElement;
+
typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
typedef int (*ShadowVirtQueueStart)(VhostShadowVirtqueue *svq,
void *opaque);
@@ -55,8 +59,8 @@ typedef struct VhostShadowVirtqueue {
/* IOVA mapping */
VhostIOVATree *iova_tree;
- /* Map for use the guest's descriptors */
- VirtQueueElement **ring_id_maps;
+ /* Each element context */
+ SVQElement *ring_id_maps;
/* Next VirtQueue element that guest made available */
VirtQueueElement *next_guest_avail_elem;
diff --git a/hw/virtio/vhost-shadow-virtqueue.c
b/hw/virtio/vhost-shadow-virtqueue.c
index 46d3c1d74f..913bca8769 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -237,7 +237,7 @@ static bool vhost_svq_add(VhostShadowVirtqueue *svq, const
struct iovec *out_sg,
return false;
}
- svq->ring_id_maps[qemu_head] = elem;
+ svq->ring_id_maps[qemu_head].elem = elem;
return true;
}
@@ -385,15 +385,25 @@ static uint16_t vhost_svq_last_desc_of_chain(const
VhostShadowVirtqueue *svq,
return i;
}
-static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
- uint32_t *len)
+static bool vhost_svq_is_empty_elem(SVQElement elem)
+{
+ return elem.elem == NULL;
+}
+
+static SVQElement vhost_svq_empty_elem(void)
+{
+ return (SVQElement){};
+}
+
+static SVQElement vhost_svq_get_buf(VhostShadowVirtqueue *svq, uint32_t *len)
{
const vring_used_t *used = svq->vring.used;
vring_used_elem_t used_elem;
+ SVQElement svq_elem = vhost_svq_empty_elem();
uint16_t last_used, last_used_chain, num;
if (!vhost_svq_more_used(svq)) {
- return NULL;
+ return svq_elem;
}
/* Only get used array entries after they have been exposed by dev */
@@ -406,24 +416,25 @@ static VirtQueueElement
*vhost_svq_get_buf(VhostShadowVirtqueue *svq,
if (unlikely(used_elem.id >= svq->vring.num)) {
qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used",
svq->vdev->name, used_elem.id);
- return NULL;
+ return svq_elem;
}
- if (unlikely(!svq->ring_id_maps[used_elem.id])) {
+ svq_elem = svq->ring_id_maps[used_elem.id];
+ svq->ring_id_maps[used_elem.id] = vhost_svq_empty_elem();
+ if (unlikely(vhost_svq_is_empty_elem(svq_elem))) {
qemu_log_mask(LOG_GUEST_ERROR,
"Device %s says index %u is used, but it was not available",
svq->vdev->name, used_elem.id);
- return NULL;
+ return svq_elem;
}
- num = svq->ring_id_maps[used_elem.id]->in_num +
- svq->ring_id_maps[used_elem.id]->out_num;
+ num = svq_elem.elem->in_num + svq_elem.elem->out_num;
last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
svq->desc_next[last_used_chain] = svq->free_head;
svq->free_head = used_elem.id;
*len = used_elem.len;
- return g_steal_pointer(&svq->ring_id_maps[used_elem.id]);
+ return svq_elem;
}
/**
@@ -454,6 +465,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
vhost_svq_disable_notification(svq);
while (true) {
uint32_t len;
+ SVQElement svq_elem;
g_autofree VirtQueueElement *elem = NULL;
if (unlikely(i >= svq->vring.num)) {
@@ -464,11 +476,12 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
return;
}
- elem = vhost_svq_get_buf(svq, &len);
- if (!elem) {
+ svq_elem = vhost_svq_get_buf(svq, &len);
+ if (vhost_svq_is_empty_elem(svq_elem)) {
break;
}
+ elem = g_steal_pointer(&svq_elem.elem);
virtqueue_fill(vq, elem, len, i++);
}
@@ -611,7 +624,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq,
VirtIODevice *vdev,
memset(svq->vring.desc, 0, driver_size);
svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size);
memset(svq->vring.used, 0, device_size);
- svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
+ svq->ring_id_maps = g_new0(SVQElement, svq->vring.num);
svq->desc_next = g_new0(uint16_t, svq->vring.num);
for (unsigned i = 0; i < svq->vring.num - 1; i++) {
svq->desc_next[i] = cpu_to_le16(i + 1);
@@ -636,7 +649,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
for (unsigned i = 0; i < svq->vring.num; ++i) {
g_autofree VirtQueueElement *elem = NULL;
- elem = g_steal_pointer(&svq->ring_id_maps[i]);
+ elem = g_steal_pointer(&svq->ring_id_maps[i].elem);
if (elem) {
virtqueue_detach_element(svq->vq, elem, 0);
}
--
2.31.1
- Re: [RFC PATCH v9 08/23] vhost: Decouple vhost_svq_add_split from VirtQueueElement, (continued)
[RFC PATCH v9 03/23] vdpa: delay set_vring_ready after DRIVER_OK, Eugenio Pérez, 2022/07/06
[RFC PATCH v9 11/23] vhost: Move last chain id to SVQ element, Eugenio Pérez, 2022/07/06
[RFC PATCH v9 09/23] vhost: Add SVQElement,
Eugenio Pérez <=
[RFC PATCH v9 12/23] vhost: Add opaque member to SVQElement, Eugenio Pérez, 2022/07/06
[RFC PATCH v9 16/23] vhost: Add svq avail_handler callback, Eugenio Pérez, 2022/07/06