qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 13/41] migration: prepare to access s->state out


From: Orit Wasserman
Subject: Re: [Qemu-devel] [PATCH 13/41] migration: prepare to access s->state outside critical sections
Date: Mon, 18 Feb 2013 12:48:21 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20130110 Thunderbird/17.0.2

On 02/15/2013 07:46 PM, Paolo Bonzini wrote:
> Accessing s->state outside the big QEMU lock will simplify a bit the
> locking/unlocking of the iothread lock.
> 
> Signed-off-by: Paolo Bonzini <address@hidden>
> ---
>  migration.c |   21 ++++++++++++---------
>  1 files changed, 12 insertions(+), 9 deletions(-)
> 
> diff --git a/migration.c b/migration.c
> index b0b5578..a7f619b 100644
> --- a/migration.c
> +++ b/migration.c
> @@ -281,14 +281,14 @@ static void migrate_fd_cleanup(MigrationState *s)
>  void migrate_fd_error(MigrationState *s)
>  {
>      DPRINTF("setting error state\n");
> -    s->state = MIG_STATE_ERROR;
> +    __sync_val_compare_and_swap(&s->state, MIG_STATE_ACTIVE, 
> MIG_STATE_ERROR);
>      migrate_fd_cleanup(s);
>  }
>  
>  static void migrate_fd_completed(MigrationState *s)
>  {
>      DPRINTF("setting completed state\n");
> -    s->state = MIG_STATE_COMPLETED;
> +    __sync_val_compare_and_swap(&s->state, MIG_STATE_ACTIVE, 
> MIG_STATE_COMPLETED);
>      migrate_fd_cleanup(s);
>  }
>  
> @@ -313,12 +313,9 @@ static ssize_t migrate_fd_put_buffer(MigrationState *s, 
> const void *data,
>  
>  static void migrate_fd_cancel(MigrationState *s)
>  {
> -    if (s->state != MIG_STATE_ACTIVE)
> -        return;
> -
>      DPRINTF("cancelling migration\n");
>  
> -    s->state = MIG_STATE_CANCELLED;
> +    __sync_val_compare_and_swap(&s->state, MIG_STATE_ACTIVE, 
> MIG_STATE_CANCELLED);
>      migrate_fd_cleanup(s);
>  }
>  
> @@ -651,12 +648,14 @@ static void *buffered_file_thread(void *opaque)
>      qemu_mutex_lock_iothread();
>      DPRINTF("beginning savevm\n");
>      qemu_savevm_state_begin(s->file, &s->params);
> +    qemu_mutex_unlock_iothread();
>  
>      while (s->state == MIG_STATE_ACTIVE) {
>          int64_t current_time = qemu_get_clock_ms(rt_clock);
>          uint64_t pending_size;
>  
>          if (s->bytes_xfer < s->xfer_limit) {
> +            qemu_mutex_lock_iothread();
>              DPRINTF("iterate\n");
>              pending_size = qemu_savevm_state_pending(s->file, max_size);
>              DPRINTF("pending size %lu max %lu\n", pending_size, max_size);
> @@ -672,8 +671,8 @@ static void *buffered_file_thread(void *opaque)
>                  qemu_savevm_state_complete(s->file);
>                  last_round = true;
>              }
> +            qemu_mutex_unlock_iothread();
>          }
> -        qemu_mutex_unlock_iothread();
>          if (current_time >= initial_time + BUFFER_DELAY) {
>              uint64_t transferred_bytes = s->bytes_xfer;
>              uint64_t time_spent = current_time - initial_time;
> @@ -692,14 +691,18 @@ static void *buffered_file_thread(void *opaque)
>              g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
>          }
>          buffered_flush(s);
> -        qemu_mutex_lock_iothread();
>          if (qemu_file_get_error(s->file)) {
> +            qemu_mutex_lock_iothread();
>              migrate_fd_error(s);
> +            qemu_mutex_unlock_iothread();
>          } else if (last_round && s->buffer_size == 0) {
> +            qemu_mutex_lock_iothread();
>              migrate_fd_completed(s);
> +            qemu_mutex_unlock_iothread();
>          }
>      }
>  
> +    qemu_mutex_lock_iothread();
>      if (s->state == MIG_STATE_COMPLETED) {
>          int64_t end_time = qemu_get_clock_ms(rt_clock);
>          s->total_time = end_time - s->total_time;
> @@ -711,8 +714,8 @@ static void *buffered_file_thread(void *opaque)
>              vm_start();
>          }
>      }
> -
>      qemu_mutex_unlock_iothread();
> +
>      g_free(s->buffer);
>      return NULL;
>  }
> 

Reviewed-by: Orit Wasserman <address@hidden>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]