qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v7 12/12] So we use multifd to transmit zero pages.


From: Juan Quintela
Subject: [PATCH v7 12/12] So we use multifd to transmit zero pages.
Date: Tue, 2 Aug 2022 08:39:07 +0200

Signed-off-by: Juan Quintela <quintela@redhat.com>

---

- Check zero_page property before using new code (Dave)
---
 migration/migration.c |  4 +---
 migration/multifd.c   |  6 +++---
 migration/ram.c       | 33 ++++++++++++++++++++++++++++++++-
 3 files changed, 36 insertions(+), 7 deletions(-)

diff --git a/migration/migration.c b/migration/migration.c
index ce3e5cc0cd..13842f6803 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -2599,9 +2599,7 @@ bool migrate_use_main_zero_page(void)
 
     s = migrate_get_current();
 
-    // We will enable this when we add the right code.
-    // return s->enabled_capabilities[MIGRATION_CAPABILITY_MAIN_ZERO_PAGE];
-    return true;
+    return s->enabled_capabilities[MIGRATION_CAPABILITY_MAIN_ZERO_PAGE];
 }
 
 bool migrate_pause_before_switchover(void)
diff --git a/migration/multifd.c b/migration/multifd.c
index 89811619d8..54acdc004c 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -667,8 +667,8 @@ static void *multifd_send_thread(void *opaque)
 {
     MultiFDSendParams *p = opaque;
     Error *local_err = NULL;
-    /* qemu older than 7.0 don't understand zero page on multifd channel */
-    bool use_zero_page = migrate_use_multifd_zero_page();
+    /* older qemu don't understand zero page on multifd channel */
+    bool use_multifd_zero_page = !migrate_use_main_zero_page();
     int ret = 0;
     bool use_zero_copy_send = migrate_use_zero_copy_send();
 
@@ -711,7 +711,7 @@ static void *multifd_send_thread(void *opaque)
 
             for (int i = 0; i < p->pages->num; i++) {
                 uint64_t offset = p->pages->offset[i];
-                if (use_zero_page &&
+                if (use_multifd_zero_page &&
                     buffer_is_zero(rb->host + offset, p->page_size)) {
                     p->zero[p->zero_num] = offset;
                     p->zero_num++;
diff --git a/migration/ram.c b/migration/ram.c
index 2af70f517a..26e60b9cc1 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2428,6 +2428,32 @@ static void postcopy_preempt_reset_channel(RAMState *rs)
     }
 }
 
+/**
+ * ram_save_target_page_multifd: save one target page
+ *
+ * Returns the number of pages written
+ *
+ * @rs: current RAM state
+ * @pss: data about the page we want to send
+ */
+static int ram_save_target_page_multifd(RAMState *rs, PageSearchStatus *pss)
+{
+    RAMBlock *block = pss->block;
+    ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
+    int res;
+
+    if (!migration_in_postcopy()) {
+        return ram_save_multifd_page(rs, block, offset);
+    }
+
+    res = save_zero_page(rs, block, offset);
+    if (res > 0) {
+        return res;
+    }
+
+    return ram_save_page(rs, pss);
+}
+
 /**
  * ram_save_host_page: save a whole host page
  *
@@ -3225,7 +3251,12 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
 
-    (*rsp)->ram_save_target_page = ram_save_target_page_legacy;
+    if (migrate_use_multifd() && !migrate_use_main_zero_page()) {
+        (*rsp)->ram_save_target_page = ram_save_target_page_multifd;
+    } else {
+        (*rsp)->ram_save_target_page = ram_save_target_page_legacy;
+    }
+
     ret =  multifd_send_sync_main(f);
     if (ret < 0) {
         return ret;
-- 
2.37.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]