[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2] migration: refactor migration_completion
From: |
Wei Wang |
Subject: |
[PATCH v2] migration: refactor migration_completion |
Date: |
Fri, 4 Aug 2023 17:30:53 +0800 |
Current migration_completion function is a bit long. Refactor the long
implementation into different subfunctions:
- migration_completion_precopy: completion code related to precopy
- migration_completion_postcopy: completion code related to postcopy
- close_return_path_on_source: rp thread related cleanup on migration
completion. It is named to match with open_return_path_on_source.
This improves readability and is easier for future updates (e.g. add new
subfunctions when completion code related to new features are needed). No
functional changes intended.
Signed-off-by: Wei Wang <wei.w.wang@intel.com>
---
Changelog:
- Merge await_return_path_close_on_source into
close_return_path_on_source as the later basically just calls the
previous;
- make migration_completion_postcopy "void" as it doesn't return a
value.
migration/migration.c | 174 ++++++++++++++++++++++++------------------
1 file changed, 98 insertions(+), 76 deletions(-)
diff --git a/migration/migration.c b/migration/migration.c
index 5528acb65e..f1c55d1148 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -2048,9 +2048,13 @@ static int open_return_path_on_source(MigrationState *ms,
return 0;
}
-/* Returns 0 if the RP was ok, otherwise there was an error on the RP */
-static int await_return_path_close_on_source(MigrationState *ms)
+static int close_return_path_on_source(MigrationState *ms)
{
+ if (!ms->rp_state.rp_thread_created) {
+ return 0;
+ }
+ trace_migration_return_path_end_before();
+
/*
* If this is a normal exit then the destination will send a SHUT and the
* rp_thread will exit, however if there's an error we need to cause
@@ -2068,6 +2072,8 @@ static int
await_return_path_close_on_source(MigrationState *ms)
qemu_thread_join(&ms->rp_state.rp_thread);
ms->rp_state.rp_thread_created = false;
trace_await_return_path_close_on_source_close();
+
+ trace_migration_return_path_end_after(ms->rp_state.error);
return ms->rp_state.error;
}
@@ -2301,66 +2307,107 @@ static int migration_maybe_pause(MigrationState *s,
return s->state == new_state ? 0 : -EINVAL;
}
-/**
- * migration_completion: Used by migration_thread when there's not much left.
- * The caller 'breaks' the loop when this returns.
- *
- * @s: Current migration state
- */
-static void migration_completion(MigrationState *s)
+static int migration_completion_precopy(MigrationState *s,
+ int *current_active_state)
{
int ret;
- int current_active_state = s->state;
- if (s->state == MIGRATION_STATUS_ACTIVE) {
- qemu_mutex_lock_iothread();
- s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
- qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
+ qemu_mutex_lock_iothread();
+ s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+ qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
- s->vm_old_state = runstate_get();
- global_state_store();
+ s->vm_old_state = runstate_get();
+ global_state_store();
- ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
- trace_migration_completion_vm_stop(ret);
- if (ret >= 0) {
- ret = migration_maybe_pause(s, ¤t_active_state,
- MIGRATION_STATUS_DEVICE);
- }
- if (ret >= 0) {
- /*
- * Inactivate disks except in COLO, and track that we
- * have done so in order to remember to reactivate
- * them if migration fails or is cancelled.
- */
- s->block_inactive = !migrate_colo();
- migration_rate_set(RATE_LIMIT_DISABLED);
- ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
- s->block_inactive);
- }
+ ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
+ trace_migration_completion_vm_stop(ret);
+ if (ret < 0) {
+ goto out_unlock;
+ }
- qemu_mutex_unlock_iothread();
+ ret = migration_maybe_pause(s, current_active_state,
+ MIGRATION_STATUS_DEVICE);
+ if (ret < 0) {
+ goto out_unlock;
+ }
- if (ret < 0) {
- goto fail;
- }
- } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
- trace_migration_completion_postcopy_end();
+ /*
+ * Inactivate disks except in COLO, and track that we have done so in order
+ * to remember to reactivate them if migration fails or is cancelled.
+ */
+ s->block_inactive = !migrate_colo();
+ migration_rate_set(RATE_LIMIT_DISABLED);
+ ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
+ s->block_inactive);
+out_unlock:
+ qemu_mutex_unlock_iothread();
+ return ret;
+}
- qemu_mutex_lock_iothread();
- qemu_savevm_state_complete_postcopy(s->to_dst_file);
- qemu_mutex_unlock_iothread();
+static void migration_completion_postcopy(MigrationState *s)
+{
+ trace_migration_completion_postcopy_end();
+ qemu_mutex_lock_iothread();
+ qemu_savevm_state_complete_postcopy(s->to_dst_file);
+ qemu_mutex_unlock_iothread();
+
+ /*
+ * Shutdown the postcopy fast path thread. This is only needed when dest
+ * QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need this.
+ */
+ if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
+ postcopy_preempt_shutdown_file(s);
+ }
+
+ trace_migration_completion_postcopy_end_after_complete();
+}
+
+static void migration_completion_failed(MigrationState *s,
+ int current_active_state)
+{
+ if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE ||
+ s->state == MIGRATION_STATUS_DEVICE)) {
/*
- * Shutdown the postcopy fast path thread. This is only needed
- * when dest QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need
- * this.
+ * If not doing postcopy, vm_start() will be called: let's
+ * regain control on images.
*/
- if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
- postcopy_preempt_shutdown_file(s);
+ Error *local_err = NULL;
+
+ qemu_mutex_lock_iothread();
+ bdrv_activate_all(&local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ } else {
+ s->block_inactive = false;
}
+ qemu_mutex_unlock_iothread();
+ }
- trace_migration_completion_postcopy_end_after_complete();
+ migrate_set_state(&s->state, current_active_state,
+ MIGRATION_STATUS_FAILED);
+}
+
+/**
+ * migration_completion: Used by migration_thread when there's not much left.
+ * The caller 'breaks' the loop when this returns.
+ *
+ * @s: Current migration state
+ */
+static void migration_completion(MigrationState *s)
+{
+ int ret = 0;
+ int current_active_state = s->state;
+
+ if (s->state == MIGRATION_STATUS_ACTIVE) {
+ ret = migration_completion_precopy(s, ¤t_active_state);
+ } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
+ migration_completion_postcopy(s);
} else {
+ ret = -1;
+ }
+
+ if (ret < 0) {
goto fail;
}
@@ -2370,14 +2417,8 @@ static void migration_completion(MigrationState *s)
* it will wait for the destination to send it's status in
* a SHUT command).
*/
- if (s->rp_state.rp_thread_created) {
- int rp_error;
- trace_migration_return_path_end_before();
- rp_error = await_return_path_close_on_source(s);
- trace_migration_return_path_end_after(rp_error);
- if (rp_error) {
- goto fail;
- }
+ if (close_return_path_on_source(s) < 0) {
+ goto fail;
}
if (qemu_file_get_error(s->to_dst_file)) {
@@ -2397,26 +2438,7 @@ static void migration_completion(MigrationState *s)
return;
fail:
- if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE ||
- s->state == MIGRATION_STATUS_DEVICE)) {
- /*
- * If not doing postcopy, vm_start() will be called: let's
- * regain control on images.
- */
- Error *local_err = NULL;
-
- qemu_mutex_lock_iothread();
- bdrv_activate_all(&local_err);
- if (local_err) {
- error_report_err(local_err);
- } else {
- s->block_inactive = false;
- }
- qemu_mutex_unlock_iothread();
- }
-
- migrate_set_state(&s->state, current_active_state,
- MIGRATION_STATUS_FAILED);
+ migration_completion_failed(s, current_active_state);
}
/**
--
2.27.0
- [PATCH v2] migration: refactor migration_completion,
Wei Wang <=