[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 01/20] migration: Pass threshold_size to .state_pending_{estim
From: |
Avihai Horon |
Subject: |
[PATCH v2 01/20] migration: Pass threshold_size to .state_pending_{estimate, exact}() |
Date: |
Wed, 22 Feb 2023 19:48:56 +0200 |
Pass threshold_size to .state_pending_{estimate,exact}().
This parameter will be used in the following patch by VFIO migration to
force the complete transmission of all VFIO pre-copy initial bytes prior
moving to stop-copy phase, which can reduce migration downtime.
Signed-off-by: Avihai Horon <avihaih@nvidia.com>
---
include/migration/register.h | 7 ++++---
migration/savevm.h | 6 ++++--
hw/s390x/s390-stattrib.c | 4 ++--
hw/vfio/migration.c | 3 ++-
migration/block-dirty-bitmap.c | 2 +-
migration/block.c | 4 ++--
migration/migration.c | 12 ++++++++----
migration/ram.c | 6 ++++--
migration/savevm.c | 12 ++++++++----
migration/trace-events | 4 ++--
10 files changed, 37 insertions(+), 23 deletions(-)
diff --git a/include/migration/register.h b/include/migration/register.h
index a8dfd8fefd..85d22931a7 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -61,11 +61,12 @@ typedef struct SaveVMHandlers {
* pending data.
*/
/* This estimates the remaining data to transfer */
- void (*state_pending_estimate)(void *opaque, uint64_t *must_precopy,
+ void (*state_pending_estimate)(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy);
/* This calculate the exact remaining data to transfer */
- void (*state_pending_exact)(void *opaque, uint64_t *must_precopy,
- uint64_t *can_postcopy);
+ void (*state_pending_exact)(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy, uint64_t
*can_postcopy);
LoadStateHandler *load_state;
int (*load_setup)(QEMUFile *f, void *opaque);
int (*load_cleanup)(void *opaque);
diff --git a/migration/savevm.h b/migration/savevm.h
index fb636735f0..c94d31f051 100644
--- a/migration/savevm.h
+++ b/migration/savevm.h
@@ -40,9 +40,11 @@ void qemu_savevm_state_cleanup(void);
void qemu_savevm_state_complete_postcopy(QEMUFile *f);
int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
bool inactivate_disks);
-void qemu_savevm_state_pending_exact(uint64_t *must_precopy,
+void qemu_savevm_state_pending_exact(uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy);
-void qemu_savevm_state_pending_estimate(uint64_t *must_precopy,
+void qemu_savevm_state_pending_estimate(uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy);
void qemu_savevm_send_ping(QEMUFile *f, uint32_t value);
void qemu_savevm_send_open_return_path(QEMUFile *f);
diff --git a/hw/s390x/s390-stattrib.c b/hw/s390x/s390-stattrib.c
index aed919ad7d..f1d4064c09 100644
--- a/hw/s390x/s390-stattrib.c
+++ b/hw/s390x/s390-stattrib.c
@@ -182,8 +182,8 @@ static int cmma_save_setup(QEMUFile *f, void *opaque)
return 0;
}
-static void cmma_state_pending(void *opaque, uint64_t *must_precopy,
- uint64_t *can_postcopy)
+static void cmma_state_pending(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy, uint64_t *can_postcopy)
{
S390StAttribState *sas = S390_STATTRIB(opaque);
S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas);
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index a2c3d9bade..4fb7d01532 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -314,7 +314,8 @@ static void vfio_save_cleanup(void *opaque)
* repeatedly while pending RAM size is over the threshold, thus migration
* can't converge and querying the VFIO device pending data size is useless.
*/
-static void vfio_state_pending_exact(void *opaque, uint64_t *must_precopy,
+static void vfio_state_pending_exact(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy)
{
VFIODevice *vbasedev = opaque;
diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c
index fe73aa94b1..4fe0b83bc8 100644
--- a/migration/block-dirty-bitmap.c
+++ b/migration/block-dirty-bitmap.c
@@ -762,7 +762,7 @@ static int dirty_bitmap_save_complete(QEMUFile *f, void
*opaque)
return 0;
}
-static void dirty_bitmap_state_pending(void *opaque,
+static void dirty_bitmap_state_pending(void *opaque, uint64_t threshold_size,
uint64_t *must_precopy,
uint64_t *can_postcopy)
{
diff --git a/migration/block.c b/migration/block.c
index 426a25bb19..70438a299c 100644
--- a/migration/block.c
+++ b/migration/block.c
@@ -853,8 +853,8 @@ static int block_save_complete(QEMUFile *f, void *opaque)
return 0;
}
-static void block_state_pending(void *opaque, uint64_t *must_precopy,
- uint64_t *can_postcopy)
+static void block_state_pending(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy, uint64_t *can_postcopy)
{
/* Estimate pending number of bytes to send */
uint64_t pending;
diff --git a/migration/migration.c b/migration/migration.c
index ae2025d9d8..a0777d9848 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -3866,15 +3866,19 @@ static MigIterateState
migration_iteration_run(MigrationState *s)
uint64_t must_precopy, can_postcopy;
bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
- qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy);
+ qemu_savevm_state_pending_estimate(s->threshold_size, &must_precopy,
+ &can_postcopy);
uint64_t pending_size = must_precopy + can_postcopy;
- trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy);
+ trace_migrate_pending_estimate(pending_size, s->threshold_size,
+ must_precopy, can_postcopy);
if (must_precopy <= s->threshold_size) {
- qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy);
+ qemu_savevm_state_pending_exact(s->threshold_size, &must_precopy,
+ &can_postcopy);
pending_size = must_precopy + can_postcopy;
- trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy);
+ trace_migrate_pending_exact(pending_size, s->threshold_size,
+ must_precopy, can_postcopy);
}
if (!pending_size || pending_size < s->threshold_size) {
diff --git a/migration/ram.c b/migration/ram.c
index 96e8a19a58..514a18b5d7 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -3489,7 +3489,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
return 0;
}
-static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy,
+static void ram_state_pending_estimate(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy)
{
RAMState **temp = opaque;
@@ -3505,7 +3506,8 @@ static void ram_state_pending_estimate(void *opaque,
uint64_t *must_precopy,
}
}
-static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
+static void ram_state_pending_exact(void *opaque, uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy)
{
RAMState **temp = opaque;
diff --git a/migration/savevm.c b/migration/savevm.c
index aa54a67fda..a642c0dd5a 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1541,7 +1541,8 @@ flush:
* the result is split into the amount for units that can and
* for units that can't do postcopy.
*/
-void qemu_savevm_state_pending_estimate(uint64_t *must_precopy,
+void qemu_savevm_state_pending_estimate(uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy)
{
SaveStateEntry *se;
@@ -1558,11 +1559,13 @@ void qemu_savevm_state_pending_estimate(uint64_t
*must_precopy,
continue;
}
}
- se->ops->state_pending_estimate(se->opaque, must_precopy,
can_postcopy);
+ se->ops->state_pending_estimate(se->opaque, threshold_size,
+ must_precopy, can_postcopy);
}
}
-void qemu_savevm_state_pending_exact(uint64_t *must_precopy,
+void qemu_savevm_state_pending_exact(uint64_t threshold_size,
+ uint64_t *must_precopy,
uint64_t *can_postcopy)
{
SaveStateEntry *se;
@@ -1579,7 +1582,8 @@ void qemu_savevm_state_pending_exact(uint64_t
*must_precopy,
continue;
}
}
- se->ops->state_pending_exact(se->opaque, must_precopy, can_postcopy);
+ se->ops->state_pending_exact(se->opaque, threshold_size, must_precopy,
+ can_postcopy);
}
}
diff --git a/migration/trace-events b/migration/trace-events
index 92161eeac5..b23c044f5e 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -150,8 +150,8 @@ migrate_fd_cleanup(void) ""
migrate_fd_error(const char *error_desc) "error=%s"
migrate_fd_cancel(void) ""
migrate_handle_rp_req_pages(const char *rbname, size_t start, size_t len) "in
%s at 0x%zx len 0x%zx"
-migrate_pending_exact(uint64_t size, uint64_t pre, uint64_t post) "exact
pending size %" PRIu64 " (pre = %" PRIu64 " post=%" PRIu64 ")"
-migrate_pending_estimate(uint64_t size, uint64_t pre, uint64_t post) "estimate
pending size %" PRIu64 " (pre = %" PRIu64 " post=%" PRIu64 ")"
+migrate_pending_exact(uint64_t size, uint64_t threshold_size, uint64_t pre,
uint64_t post) "exact pending size %" PRIu64 " threshold size %" PRIu64 " (pre
= %" PRIu64 " post=%" PRIu64 ")"
+migrate_pending_estimate(uint64_t size, uint64_t threshold_size, uint64_t pre,
uint64_t post) "estimate pending size %" PRIu64 " threshold size %" PRIu64 "
(pre = %" PRIu64 " post=%" PRIu64 ")"
migrate_send_rp_message(int msg_type, uint16_t len) "%d: len %d"
migrate_send_rp_recv_bitmap(char *name, int64_t size) "block '%s' size
0x%"PRIi64
migration_completion_file_err(void) ""
--
2.26.3
- [PATCH v2 00/20] vfio: Add migration pre-copy support and device dirty tracking, Avihai Horon, 2023/02/22
- [PATCH v2 01/20] migration: Pass threshold_size to .state_pending_{estimate, exact}(),
Avihai Horon <=
- [PATCH v2 02/20] vfio/migration: Refactor vfio_save_block() to return saved data size, Avihai Horon, 2023/02/22
- [PATCH v2 04/20] vfio/common: Fix error reporting in vfio_get_dirty_bitmap(), Avihai Horon, 2023/02/22
- [PATCH v2 07/20] vfio/common: Add VFIOBitmap and (de)alloc functions, Avihai Horon, 2023/02/22
- [PATCH v2 09/20] util: Extend iova_tree_foreach() to take data argument, Avihai Horon, 2023/02/22
- [PATCH v2 05/20] vfio/common: Fix wrong %m usages, Avihai Horon, 2023/02/22
- [PATCH v2 06/20] vfio/common: Abort migration if dirty log start/stop/sync fails, Avihai Horon, 2023/02/22