qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v7 29/42] Postcopy end in migration_thread


From: Juan Quintela
Subject: Re: [Qemu-devel] [PATCH v7 29/42] Postcopy end in migration_thread
Date: Mon, 13 Jul 2015 15:15:07 +0200
User-agent: Gnus/5.13 (Gnus v5.13) Emacs/24.5 (gnu/linux)

"Dr. David Alan Gilbert (git)" <address@hidden> wrote:
> From: "Dr. David Alan Gilbert" <address@hidden>
>
> The end of migration in postcopy is a bit different since some of
> the things normally done at the end of migration have already been
> done on the transition to postcopy.
>
> The end of migration code is getting a bit complciated now, so
> move out into its own function.
>
> Signed-off-by: Dr. David Alan Gilbert <address@hidden>

I think that I would splint the function and then add the postcopy code.

BTW, it is a local function, we can use shorter names:

migration_completion()?

trace names specifically get hugggggggggge.


> +static void migration_thread_end_of_iteration(MigrationState *s,
> +                                              int current_active_state,

RunState?
And it is not needed as parameter.


> +                                              bool *old_vm_running,
> +                                              int64_t *start_time)
> +{
> +    int ret;
> +    if (s->state == MIGRATION_STATUS_ACTIVE) {
           current_active_state = s->state;
> +        qemu_mutex_lock_iothread();
> +        *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
> +        qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
> +        *old_vm_running = runstate_is_running();
> +
> +        ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
> +        if (ret >= 0) {
> +            qemu_file_set_rate_limit(s->file, INT64_MAX);
> +            qemu_savevm_state_complete_precopy(s->file);
> +        }
> +        qemu_mutex_unlock_iothread();
> +
> +        if (ret < 0) {
> +            goto fail;
> +        }
> +    } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
           current_active_state = s->state;
> +        trace_migration_thread_end_of_iteration_postcopy_end();
> +
> +        qemu_savevm_state_complete_postcopy(s->file);
> +        
> trace_migration_thread_end_of_iteration_postcopy_end_after_complete();
> +    }
> +
> +    /*
> +     * If rp was opened we must clean up the thread before
> +     * cleaning everything else up (since if there are no failures
> +     * it will wait for the destination to send it's status in
> +     * a SHUT command).
> +     * Postcopy opens rp if enabled (even if it's not avtivated)
> +     */
> +    if (migrate_postcopy_ram()) {
> +        int rp_error;
> +        trace_migration_thread_end_of_iteration_postcopy_end_before_rp();
> +        rp_error = await_return_path_close_on_source(s);
> +        
> trace_migration_thread_end_of_iteration_postcopy_end_after_rp(rp_error);
> +        if (rp_error) {
> +            goto fail;
> +        }
> +    }
> +
> +    if (qemu_file_get_error(s->file)) {
> +        trace_migration_thread_end_of_iteration_file_err();
> +        goto fail;
> +    }
> +
> +    migrate_set_state(s, current_active_state, MIGRATION_STATUS_COMPLETED);
> +    return;
> +
> +fail:
> +    migrate_set_state(s, current_active_state, MIGRATION_STATUS_FAILED);
> +}
> +
> +/*
>   * Master migration thread on the source VM.
>   * It drives the migration and pumps the data down the outgoing channel.
>   */
> @@ -1233,31 +1294,11 @@ static void *migration_thread(void *opaque)
>                  /* Just another iteration step */
>                  qemu_savevm_state_iterate(s->file);
>              } else {
> -                int ret;
> -
> -                qemu_mutex_lock_iothread();
> -                start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
> -                qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
> -                old_vm_running = runstate_is_running();
> -
> -                ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
> -                if (ret >= 0) {
> -                    qemu_file_set_rate_limit(s->file, INT64_MAX);
> -                    qemu_savevm_state_complete_precopy(s->file);
> -                }
> -                qemu_mutex_unlock_iothread();
> +                trace_migration_thread_low_pending(pending_size);
>  
> -                if (ret < 0) {
> -                    migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
> -                                      MIGRATION_STATUS_FAILED);
> -                    break;
> -                }
> -
> -                if (!qemu_file_get_error(s->file)) {
> -                    migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
> -                                      MIGRATION_STATUS_COMPLETED);
> -                    break;
> -                }
> +                migration_thread_end_of_iteration(s, current_active_type,
> +                    &old_vm_running, &start_time);
> +                break;
>              }
>          }
>  
> diff --git a/trace-events b/trace-events
> index f096877..528d5a3 100644
> --- a/trace-events
> +++ b/trace-events
> @@ -1425,6 +1425,12 @@ migrate_send_rp_message(int msg_type, uint16_t len) 
> "%d: len %d"
>  migration_thread_after_loop(void) ""
>  migration_thread_file_err(void) ""
>  migration_thread_setup_complete(void) ""
> +migration_thread_low_pending(uint64_t pending) "%" PRIu64
> +migration_thread_end_of_iteration_file_err(void) ""
> +migration_thread_end_of_iteration_postcopy_end(void) ""
> +migration_thread_end_of_iteration_postcopy_end_after_complete(void) ""
> +migration_thread_end_of_iteration_postcopy_end_before_rp(void) ""
> +migration_thread_end_of_iteration_postcopy_end_after_rp(int rp_error) "%d"
>  open_return_path_on_source(void) ""
>  open_return_path_on_source_continue(void) ""
>  postcopy_start(void) ""



reply via email to

[Prev in Thread] Current Thread [Next in Thread]