[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 08/14] vdpa: add vdpa net migration state notifier
From: |
Eugenio Pérez |
Subject: |
[PATCH v3 08/14] vdpa: add vdpa net migration state notifier |
Date: |
Wed, 15 Feb 2023 18:38:44 +0100 |
This allows net to restart the device backend to configure SVQ on it.
Ideally, these changes should not be net specific. However, the vdpa net
backend is the one with enough knowledge to configure everything because
of some reasons:
* Queues might need to be shadowed or not depending on its kind (control
vs data).
* Queues need to share the same map translations (iova tree).
Because of that it is cleaner to restart the whole net backend and
configure again as expected, similar to how vhost-kernel moves between
userspace and passthrough.
If more kinds of devices need dynamic switching to SVQ we can create a
callback struct like VhostOps and move most of the code there.
VhostOps cannot be reused since all vdpa backend share them, and to
personalize just for networking would be too heavy.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
v3:
* Check for migration state at vdpa device start to enable SVQ in data
vqs.
v1 from RFC:
* Add TODO to use the resume operation in the future.
* Use migration_in_setup and migration_has_failed instead of a
complicated switch case.
---
net/vhost-vdpa.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 86 insertions(+)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index dd686b4514..cf9830bb02 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -26,12 +26,15 @@
#include <err.h>
#include "standard-headers/linux/virtio_net.h"
#include "monitor/monitor.h"
+#include "migration/migration.h"
+#include "migration/misc.h"
#include "hw/virtio/vhost.h"
/* Todo:need to add the multiqueue support here */
typedef struct VhostVDPAState {
NetClientState nc;
struct vhost_vdpa vhost_vdpa;
+ Notifier migration_state;
VHostNetState *vhost_net;
/* Control commands shadow buffers */
@@ -241,10 +244,79 @@ static VhostVDPAState
*vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
return DO_UPCAST(VhostVDPAState, nc, nc0);
}
+static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
+{
+ struct vhost_vdpa *v = &s->vhost_vdpa;
+ VirtIONet *n;
+ VirtIODevice *vdev;
+ int data_queue_pairs, cvq, r;
+ NetClientState *peer;
+
+ /* We are only called on the first data vqs and only if x-svq is not set */
+ if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
+ return;
+ }
+
+ vdev = v->dev->vdev;
+ n = VIRTIO_NET(vdev);
+ if (!n->vhost_started) {
+ return;
+ }
+
+ data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
+ cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
+ n->max_ncs - n->max_queue_pairs : 0;
+ /*
+ * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
+ * in the future and resume the device if read-only operations between
+ * suspend and reset goes wrong.
+ */
+ vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
+
+ peer = s->nc.peer;
+ for (int i = 0; i < data_queue_pairs + cvq; i++) {
+ VhostVDPAState *vdpa_state;
+ NetClientState *nc;
+
+ if (i < data_queue_pairs) {
+ nc = qemu_get_peer(peer, i);
+ } else {
+ nc = qemu_get_peer(peer, n->max_queue_pairs);
+ }
+
+ vdpa_state = DO_UPCAST(VhostVDPAState, nc, nc);
+ vdpa_state->vhost_vdpa.shadow_data = enable;
+
+ if (i < data_queue_pairs) {
+ /* Do not override CVQ shadow_vqs_enabled */
+ vdpa_state->vhost_vdpa.shadow_vqs_enabled = enable;
+ }
+ }
+
+ r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
+ if (unlikely(r < 0)) {
+ error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
+ }
+}
+
+static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
+{
+ MigrationState *migration = data;
+ VhostVDPAState *s = container_of(notifier, VhostVDPAState,
+ migration_state);
+
+ if (migration_in_setup(migration)) {
+ vhost_vdpa_net_log_global_enable(s, true);
+ } else if (migration_has_failed(migration)) {
+ vhost_vdpa_net_log_global_enable(s, false);
+ }
+}
+
static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
{
struct vhost_vdpa *v = &s->vhost_vdpa;
+ add_migration_state_change_notifier(&s->migration_state);
if (v->shadow_vqs_enabled) {
v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
v->iova_range.last);
@@ -258,6 +330,15 @@ static int vhost_vdpa_net_data_start(NetClientState *nc)
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+ if (s->always_svq ||
+ migration_is_setup_or_active(migrate_get_current()->state)) {
+ v->shadow_vqs_enabled = true;
+ v->shadow_data = true;
+ } else {
+ v->shadow_vqs_enabled = false;
+ v->shadow_data = false;
+ }
+
if (v->index == 0) {
vhost_vdpa_net_data_start_first(s);
return 0;
@@ -278,6 +359,10 @@ static void vhost_vdpa_net_client_stop(NetClientState *nc)
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
+ if (s->vhost_vdpa.index == 0) {
+ remove_migration_state_change_notifier(&s->migration_state);
+ }
+
dev = s->vhost_vdpa.dev;
if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
@@ -741,6 +826,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState
*peer,
s->vhost_vdpa.device_fd = vdpa_device_fd;
s->vhost_vdpa.index = queue_pair_index;
s->always_svq = svq;
+ s->migration_state.notify = vdpa_net_migration_state_notifier;
s->vhost_vdpa.shadow_vqs_enabled = svq;
s->vhost_vdpa.iova_range = iova_range;
s->vhost_vdpa.shadow_data = svq;
--
2.31.1
- [PATCH v3 00/14] Dynamically switch to vhost shadow virtqueues at vdpa net migration, Eugenio Pérez, 2023/02/15
- [PATCH v3 01/14] vdpa net: move iova tree creation from init to start, Eugenio Pérez, 2023/02/15
- [PATCH v3 02/14] vdpa: stop svq at vhost_vdpa_dev_start(false), Eugenio Pérez, 2023/02/15
- [PATCH v3 03/14] vdpa: Negotiate _F_SUSPEND feature, Eugenio Pérez, 2023/02/15
- [PATCH v3 04/14] vdpa: add vhost_vdpa_suspend, Eugenio Pérez, 2023/02/15
- [PATCH v3 05/14] vdpa: move vhost reset after get vring base, Eugenio Pérez, 2023/02/15
- [PATCH v3 06/14] vdpa: rewind at get_base, not set_base, Eugenio Pérez, 2023/02/15
- [PATCH v3 07/14] vdpa net: allow VHOST_F_LOG_ALL, Eugenio Pérez, 2023/02/15
- [PATCH v3 08/14] vdpa: add vdpa net migration state notifier,
Eugenio Pérez <=
- [PATCH v3 10/14] vdpa net: block migration if the device has CVQ, Eugenio Pérez, 2023/02/15
- [PATCH v3 11/14] vdpa: block migration if device has unsupported features, Eugenio Pérez, 2023/02/15
- [PATCH v3 12/14] vdpa: block migration if dev does not have _F_SUSPEND, Eugenio Pérez, 2023/02/15
- [PATCH v3 13/14] vdpa: block migration if SVQ does not admit a feature, Eugenio Pérez, 2023/02/15
- [PATCH v3 14/14] vdpa: return VHOST_F_LOG_ALL in vhost-vdpa devices, Eugenio Pérez, 2023/02/15
- [PATCH v3 09/14] vdpa: disable RAM block discard only for the first device, Eugenio Pérez, 2023/02/15