[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 04/13] vdpa: move shadow_data to vhost_vdpa_shared
From: |
Eugenio Pérez |
Subject: |
[PATCH v4 04/13] vdpa: move shadow_data to vhost_vdpa_shared |
Date: |
Thu, 21 Dec 2023 18:43:13 +0100 |
Next patches will register the vhost_vdpa memory listener while the VM
is migrating at the destination, so we can map the memory to the device
before stopping the VM at the source. The main goal is to reduce the
downtime.
However, the destination QEMU is unaware of which vhost_vdpa device will
register its memory_listener. If the source guest has CVQ enabled, it
will be the CVQ device. Otherwise, it will be the first one.
Move the shadow_data member to VhostVDPAShared so all vhost_vdpa can use
it, rather than always in the first or last vhost_vdpa.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
---
v1 from RFC:
* Fix vhost_vdpa_net_cvq_start checking for always_svq instead of
shadow_data. This could cause CVQ not being shadowed if
vhost_vdpa_net_cvq_start was called in the middle of a migration.
v2:
* Avoid repeated setting shared->shadow_data by squashing Si-Wei's patch
[1]
[1]
1701970793-6865-10-git-send-email-si-wei.liu@oracle.com/">https://patchwork.kernel.org/project/qemu-devel/patch/1701970793-6865-10-git-send-email-si-wei.liu@oracle.com/
---
include/hw/virtio/vhost-vdpa.h | 5 +++--
hw/virtio/vhost-vdpa.c | 6 +++---
net/vhost-vdpa.c | 22 +++++-----------------
3 files changed, 11 insertions(+), 22 deletions(-)
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 8d52a7e498..01e0f25e27 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -36,6 +36,9 @@ typedef struct vhost_vdpa_shared {
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
+
+ /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
+ bool shadow_data;
} VhostVDPAShared;
typedef struct vhost_vdpa {
@@ -47,8 +50,6 @@ typedef struct vhost_vdpa {
MemoryListener listener;
uint64_t acked_features;
bool shadow_vqs_enabled;
- /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
- bool shadow_data;
/* Device suspended successfully */
bool suspended;
VhostVDPAShared *shared;
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 2bceadd118..ec028e4c56 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -353,7 +353,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener
*listener,
vaddr, section->readonly);
llsize = int128_sub(llend, int128_make64(iova));
- if (v->shadow_data) {
+ if (v->shared->shadow_data) {
int r;
mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
@@ -380,7 +380,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener
*listener,
return;
fail_map:
- if (v->shadow_data) {
+ if (v->shared->shadow_data) {
vhost_iova_tree_remove(v->shared->iova_tree, mem_region);
}
@@ -435,7 +435,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener
*listener,
llsize = int128_sub(llend, int128_make64(iova));
- if (v->shadow_data) {
+ if (v->shared->shadow_data) {
const DMAMap *result;
const void *vaddr = memory_region_get_ram_ptr(section->mr) +
section->offset_within_region +
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 7be2c30ad3..bf8e8327da 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -290,15 +290,6 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc,
const uint8_t *buf,
return size;
}
-/** From any vdpa net client, get the netclient of the first queue pair */
-static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
-{
- NICState *nic = qemu_get_nic(s->nc.peer);
- NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
-
- return DO_UPCAST(VhostVDPAState, nc, nc0);
-}
-
static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
{
struct vhost_vdpa *v = &s->vhost_vdpa;
@@ -369,13 +360,12 @@ static int vhost_vdpa_net_data_start(NetClientState *nc)
if (s->always_svq ||
migration_is_setup_or_active(migrate_get_current()->state)) {
v->shadow_vqs_enabled = true;
- v->shadow_data = true;
} else {
v->shadow_vqs_enabled = false;
- v->shadow_data = false;
}
if (v->index == 0) {
+ v->shared->shadow_data = v->shadow_vqs_enabled;
vhost_vdpa_net_data_start_first(s);
return 0;
}
@@ -523,7 +513,7 @@ dma_map_err:
static int vhost_vdpa_net_cvq_start(NetClientState *nc)
{
- VhostVDPAState *s, *s0;
+ VhostVDPAState *s;
struct vhost_vdpa *v;
int64_t cvq_group;
int r;
@@ -534,12 +524,10 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
s = DO_UPCAST(VhostVDPAState, nc, nc);
v = &s->vhost_vdpa;
- s0 = vhost_vdpa_net_first_nc_vdpa(s);
- v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
- v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
+ v->shadow_vqs_enabled = v->shared->shadow_data;
s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
- if (s->vhost_vdpa.shadow_data) {
+ if (v->shared->shadow_data) {
/* SVQ is already configured for all virtqueues */
goto out;
}
@@ -1688,12 +1676,12 @@ static NetClientState
*net_vhost_vdpa_init(NetClientState *peer,
s->always_svq = svq;
s->migration_state.notify = NULL;
s->vhost_vdpa.shadow_vqs_enabled = svq;
- s->vhost_vdpa.shadow_data = svq;
if (queue_pair_index == 0) {
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
s->vhost_vdpa.shared->iova_range = iova_range;
+ s->vhost_vdpa.shared->shadow_data = svq;
} else if (!is_datapath) {
s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
PROT_READ | PROT_WRITE,
--
2.39.3
- [PATCH v4 00/13] Consolidate common vdpa members in VhostVDPAShared, Eugenio Pérez, 2023/12/21
- [PATCH v4 01/13] vdpa: add VhostVDPAShared, Eugenio Pérez, 2023/12/21
- [PATCH v4 02/13] vdpa: move iova tree to the shared struct, Eugenio Pérez, 2023/12/21
- [PATCH v4 03/13] vdpa: move iova_range to vhost_vdpa_shared, Eugenio Pérez, 2023/12/21
- [PATCH v4 04/13] vdpa: move shadow_data to vhost_vdpa_shared,
Eugenio Pérez <=
- [PATCH v4 05/13] vdpa: use vdpa shared for tracing, Eugenio Pérez, 2023/12/21
- [PATCH v4 09/13] vdpa: remove msg type of vhost_vdpa, Eugenio Pérez, 2023/12/21
- [PATCH v4 08/13] vdpa: move backend_cap to vhost_vdpa_shared, Eugenio Pérez, 2023/12/21
- [PATCH v4 06/13] vdpa: move file descriptor to vhost_vdpa_shared, Eugenio Pérez, 2023/12/21
- [PATCH v4 07/13] vdpa: move iotlb_batch_begin_sent to vhost_vdpa_shared, Eugenio Pérez, 2023/12/21
- [PATCH v4 11/13] vdpa: use VhostVDPAShared in vdpa_dma_map and unmap, Eugenio Pérez, 2023/12/21
- [PATCH v4 10/13] vdpa: move iommu_list to vhost_vdpa_shared, Eugenio Pérez, 2023/12/21
- [PATCH v4 12/13] vdpa: use dev_shared in vdpa_iommu, Eugenio Pérez, 2023/12/21
- [PATCH v4 13/13] vdpa: move memory listener to vhost_vdpa_shared, Eugenio Pérez, 2023/12/21