qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 17/40] vdpa: judge if map can be kept across reset


From: Si-Wei Liu
Subject: [PATCH 17/40] vdpa: judge if map can be kept across reset
Date: Thu, 7 Dec 2023 09:39:30 -0800

The descriptor group for SVQ ASID allows the guest memory mapping
to retain across SVQ switching, same as how isolated CVQ can do
with a different ASID than the guest GPA space. Introduce an
evaluation function to judge whether to flush or keep iotlb maps
based on virtqueue's descriptor group and cvq isolation capability.

Have to hook the evaluation function to NetClient's .poll op as
.vhost_reset_status runs ahead of .stop, and .vhost_dev_start
don't have access to the vhost-vdpa net's information.

Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
---
 net/vhost-vdpa.c | 40 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 04718b2..e9b96ed 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -504,12 +504,36 @@ static int vhost_vdpa_net_load_cleanup(NetClientState 
*nc, NICState *nic)
                              n->parent_obj.status & VIRTIO_CONFIG_S_DRIVER_OK);
 }
 
+static void vhost_vdpa_net_data_eval_flush(NetClientState *nc, bool stop)
+{
+    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+    struct vhost_vdpa *v = &s->vhost_vdpa;
+
+    if (!stop) {
+        return;
+    }
+
+    if (s->vhost_vdpa.index == 0) {
+        if (s->always_svq) {
+            v->shared->flush_map = true;
+        } else if (!v->shared->svq_switching || v->desc_group >= 0) {
+            v->shared->flush_map = false;
+        } else {
+            v->shared->flush_map = true;
+        }
+    } else if (!s->always_svq && v->shared->svq_switching &&
+               v->desc_group < 0) {
+        v->shared->flush_map = true;
+    }
+}
+
 static NetClientInfo net_vhost_vdpa_info = {
         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
         .size = sizeof(VhostVDPAState),
         .receive = vhost_vdpa_receive,
         .start = vhost_vdpa_net_data_start,
         .load = vhost_vdpa_net_data_load,
+        .poll = vhost_vdpa_net_data_eval_flush,
         .stop = vhost_vdpa_net_client_stop,
         .cleanup = vhost_vdpa_cleanup,
         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
@@ -1368,12 +1392,28 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc)
     return 0;
 }
 
+static void vhost_vdpa_net_cvq_eval_flush(NetClientState *nc, bool stop)
+{
+    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+    struct vhost_vdpa *v = &s->vhost_vdpa;
+
+    if (!stop) {
+        return;
+    }
+
+    if (!v->shared->flush_map && !v->shared->svq_switching &&
+        !s->cvq_isolated && v->desc_group < 0) {
+        v->shared->flush_map = true;
+    }
+}
+
 static NetClientInfo net_vhost_vdpa_cvq_info = {
     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
     .size = sizeof(VhostVDPAState),
     .receive = vhost_vdpa_receive,
     .start = vhost_vdpa_net_cvq_start,
     .load = vhost_vdpa_net_cvq_load,
+    .poll = vhost_vdpa_net_cvq_eval_flush,
     .stop = vhost_vdpa_net_cvq_stop,
     .cleanup = vhost_vdpa_cleanup,
     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
-- 
1.8.3.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]