qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 2/2] migration: fix deadlock


From: Wen Congyang
Subject: Re: [Qemu-devel] [PATCH 2/2] migration: fix deadlock
Date: Fri, 25 Sep 2015 17:35:46 +0800
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Thunderbird/38.2.0

On 09/25/2015 05:09 PM, Denis V. Lunev wrote:
> Release qemu global mutex before call synchronize_rcu().
> synchronize_rcu() waiting for all readers to finish their critical
> sections. There is at least one critical section in which we try
> to get QGM (critical section is in address_space_rw() and
> prepare_mmio_access() is trying to aquire QGM).
> 
> Both functions (migration_end() and migration_bitmap_extend())
> are called from main thread which is holding QGM.
> 
> Thus there is a race condition that ends up with deadlock:
> main thread     working thread
> Lock QGA                |
> |             Call KVM_EXIT_IO handler
> |                       |
> |        Open rcu reader's critical section
> Migration cleanup bh    |
> |                       |
> synchronize_rcu() is    |
> waiting for readers     |
> |            prepare_mmio_access() is waiting for QGM
>   \                   /
>          deadlock
> 
> The patch changes bitmap freeing from direct g_free after synchronize_rcu
> to g_free_rcu.
> 
> Signed-off-by: Denis V. Lunev <address@hidden>
> Reported-by: Igor Redko <address@hidden>
> CC: Igor Redko <address@hidden>
> CC: Anna Melekhova <address@hidden>
> CC: Juan Quintela <address@hidden>
> CC: Amit Shah <address@hidden>
> CC: Paolo Bonzini <address@hidden>
> CC: Wen Congyang <address@hidden>
> ---
>  migration/ram.c | 43 ++++++++++++++++++++++++++++---------------
>  1 file changed, 28 insertions(+), 15 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index a712c68..56b6fce 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -221,12 +221,27 @@ static RAMBlock *last_seen_block;
>  /* This is the last block from where we have sent data */
>  static RAMBlock *last_sent_block;
>  static ram_addr_t last_offset;
> -static unsigned long *migration_bitmap;
>  static QemuMutex migration_bitmap_mutex;
>  static uint64_t migration_dirty_pages;
>  static uint32_t last_version;
>  static bool ram_bulk_stage;
>  
> +static struct BitmapRcu {
> +    struct rcu_head rcu;
> +    unsigned long bmap[0];
> +} *migration_bitmap_rcu;
> +
> +static inline struct BitmapRcu *bitmap_new_rcu(long nbits)
> +{
> +    long len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
> +    struct BitmapRcu *ptr = g_try_malloc0(len + sizeof(struct BitmapRcu));

It is better to allocate memory twice, one is for BitmapRcu, another is calling
bitmap_new(). The user doesn't need to know how the bitmap is implemented.

> +    if (ptr == NULL) {
> +        abort();
> +    }
> +    return ptr;
> +}
> +
> +
>  struct CompressParam {
>      bool start;
>      bool done;
> @@ -508,7 +523,7 @@ ram_addr_t 
> migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
>  
>      unsigned long next;
>  
> -    bitmap = atomic_rcu_read(&migration_bitmap);
> +    bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;

>      if (ram_bulk_stage && nr > base) {
>          next = nr + 1;
>      } else {
> @@ -526,7 +541,7 @@ ram_addr_t 
> migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
>  static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
>  {
>      unsigned long *bitmap;
> -    bitmap = atomic_rcu_read(&migration_bitmap);
> +    bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
>      migration_dirty_pages +=
>          cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
>  }
> @@ -1029,12 +1044,11 @@ static void migration_end(void)
>      /* caller have hold iothread lock or is in a bh, so there is
>       * no writing race against this migration_bitmap
>       */
> -    unsigned long *bitmap = migration_bitmap;
> -    atomic_rcu_set(&migration_bitmap, NULL);
> +    struct BitmapRcu *bitmap = migration_bitmap_rcu;
> +    atomic_rcu_set(&migration_bitmap_rcu, NULL);
>      if (bitmap) {
>          memory_global_dirty_log_stop();
> -        synchronize_rcu();
> -        g_free(bitmap);
> +        g_free_rcu(bitmap, rcu);
>      }
>  
>      XBZRLE_cache_lock();
> @@ -1070,9 +1084,9 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t 
> new)
>      /* called in qemu main thread, so there is
>       * no writing race against this migration_bitmap
>       */
> -    if (migration_bitmap) {
> -        unsigned long *old_bitmap = migration_bitmap, *bitmap;
> -        bitmap = bitmap_new(new);
> +    if (migration_bitmap_rcu) {
> +        struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
> +        bitmap = bitmap_new_rcu(new);
>  
>          /* prevent migration_bitmap content from being set bit
>           * by migration_bitmap_sync_range() at the same time.
> @@ -1080,12 +1094,11 @@ void migration_bitmap_extend(ram_addr_t old, 
> ram_addr_t new)
>           * at the same time.
>           */
>          qemu_mutex_lock(&migration_bitmap_mutex);
> -        bitmap_copy(bitmap, old_bitmap, old);
> -        atomic_rcu_set(&migration_bitmap, bitmap);
> +        bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
> +        atomic_rcu_set(&migration_bitmap_rcu, bitmap);
>          qemu_mutex_unlock(&migration_bitmap_mutex);
>          migration_dirty_pages += new - old;
> -        synchronize_rcu();
> -        g_free(old_bitmap);
> +        g_free_rcu(old_bitmap, rcu);
>      }
>  }
>  
> @@ -1144,7 +1157,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
>      reset_ram_globals();
>  
>      ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
> -    migration_bitmap = bitmap_new(ram_bitmap_pages);
> +    migration_bitmap_rcu = bitmap_new_rcu(ram_bitmap_pages);
>  
>      /*
>       * Count the total number of pages used by ram blocks not including any
> 




reply via email to

[Prev in Thread] Current Thread [Next in Thread]