qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v2 1/2] migration: protect migration_bitmap


From: Li Zhijian
Subject: [Qemu-devel] [PATCH v2 1/2] migration: protect migration_bitmap
Date: Fri, 26 Jun 2015 18:09:39 +0800

Signed-off-by: Li Zhijian <address@hidden>
Signed-off-by: Wen Congyang <address@hidden>
---
 migration/ram.c | 25 +++++++++++++++++++------
 1 file changed, 19 insertions(+), 6 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index 57368e1..4754aa9 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -222,6 +222,7 @@ static RAMBlock *last_seen_block;
 static RAMBlock *last_sent_block;
 static ram_addr_t last_offset;
 static unsigned long *migration_bitmap;
+static QemuMutex migration_bitmap_mutex;
 static uint64_t migration_dirty_pages;
 static uint32_t last_version;
 static bool ram_bulk_stage;
@@ -494,6 +495,7 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t 
**current_data,
     return 1;
 }
 
+/* Called with rcu_read_lock() to protect migration_bitmap */
 static inline
 ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
                                                  ram_addr_t start)
@@ -502,26 +504,31 @@ ram_addr_t 
migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
     unsigned long nr = base + (start >> TARGET_PAGE_BITS);
     uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
     unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
+    unsigned long *bitmap;
 
     unsigned long next;
 
+    bitmap = atomic_rcu_read(&migration_bitmap);
     if (ram_bulk_stage && nr > base) {
         next = nr + 1;
     } else {
-        next = find_next_bit(migration_bitmap, size, nr);
+        next = find_next_bit(bitmap, size, nr);
     }
 
     if (next < size) {
-        clear_bit(next, migration_bitmap);
+        clear_bit(next, bitmap);
         migration_dirty_pages--;
     }
     return (next - base) << TARGET_PAGE_BITS;
 }
 
+/* Called with rcu_read_lock() to protect migration_bitmap */
 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
 {
+    unsigned long *bitmap;
+    bitmap = atomic_rcu_read(&migration_bitmap);
     migration_dirty_pages +=
-        cpu_physical_memory_sync_dirty_bitmap(migration_bitmap, start, length);
+        cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
 }
 
 
@@ -1017,10 +1024,15 @@ void free_xbzrle_decoded_buf(void)
 
 static void migration_end(void)
 {
-    if (migration_bitmap) {
+    unsigned long *bitmap;
+    qemu_mutex_lock(&migration_bitmap_mutex);
+    bitmap = migration_bitmap;
+    atomic_rcu_set(&migration_bitmap, NULL);
+    qemu_mutex_unlock(&migration_bitmap_mutex);
+    if (bitmap) {
         memory_global_dirty_log_stop();
-        g_free(migration_bitmap);
-        migration_bitmap = NULL;
+        synchronize_rcu();
+        g_free(bitmap);
     }
 
     XBZRLE_cache_lock();
@@ -1067,6 +1079,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
     dirty_rate_high_cnt = 0;
     bitmap_sync_count = 0;
     migration_bitmap_sync_init();
+    qemu_mutex_init(&migration_bitmap_mutex);
 
     if (migrate_use_xbzrle()) {
         XBZRLE_cache_lock();
-- 
2.1.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]