qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 04/14] ram.c: Do not call save_page_header() from compress thread


From: Lukas Straub
Subject: [PATCH 04/14] ram.c: Do not call save_page_header() from compress threads
Date: Sun, 2 Apr 2023 17:56:14 +0000

save_page_header() accesses several global variables, so calling it
from multiple threads is pretty ugly.

Instead, call save_page_header() before writing out the compressed
data from the compress buffer to the migration stream.

This also makes the core compress code more independend from ram.c.

Signed-off-by: Lukas Straub <lukasstraub2@web.de>
---
 migration/ram.c | 44 +++++++++++++++++++++++++++++++++++---------
 1 file changed, 35 insertions(+), 9 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index bef6292ef7..7ab008145b 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1476,17 +1476,13 @@ static CompressResult do_compress_ram_page(QEMUFile *f, 
z_stream *stream,
                                            RAMBlock *block, ram_addr_t offset,
                                            uint8_t *source_buf)
 {
-    RAMState *rs = ram_state;
-    PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
     uint8_t *p = block->host + offset;
     int ret;

-    if (save_zero_page_to_file(pss, f, block, offset)) {
+    if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
         return RES_ZEROPAGE;
     }

-    save_page_header(pss, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
-
     /*
      * copy it to a internal buffer to avoid it being modified by VM
      * so that we can catch up the error during compression and
@@ -1526,9 +1522,40 @@ static inline void compress_reset_result(CompressParam 
*param)
     param->offset = 0;
 }

-static void flush_compressed_data(RAMState *rs)
+static int send_queued_data(CompressParam *param)
 {
+    PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY];
     MigrationState *ms = migrate_get_current();
+    QEMUFile *file = ms->to_dst_file;
+    int len = 0;
+
+    RAMBlock *block = param->block;
+    ram_addr_t offset = param->offset;
+
+    if (param->result == RES_NONE) {
+        return 0;
+    }
+
+    assert(block == pss->last_sent_block);
+
+    if (param->result == RES_ZEROPAGE) {
+        len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO);
+        qemu_put_byte(file, 0);
+        len += 1;
+        ram_release_page(block->idstr, offset);
+    } else if (param->result == RES_COMPRESS) {
+        len += save_page_header(pss, file, block,
+                                offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
+        len += qemu_put_qemu_file(file, param->file);
+    } else {
+        abort();
+    }
+
+    return len;
+}
+
+static void flush_compressed_data(RAMState *rs)
+{
     int idx, len, thread_count;

     if (!save_page_use_compression(rs)) {
@@ -1548,7 +1575,7 @@ static void flush_compressed_data(RAMState *rs)
         qemu_mutex_lock(&comp_param[idx].mutex);
         if (!comp_param[idx].quit) {
             CompressParam *param = &comp_param[idx];
-            len = qemu_put_qemu_file(ms->to_dst_file, param->file);
+            len = send_queued_data(param);
             compress_reset_result(param);

             /*
@@ -1574,7 +1601,6 @@ static int compress_page_with_multi_thread(RAMBlock 
*block, ram_addr_t offset)
 {
     int idx, thread_count, bytes_xmit = -1, pages = -1;
     bool wait = migrate_compress_wait_thread();
-    MigrationState *ms = migrate_get_current();

     thread_count = migrate_compress_threads();
     qemu_mutex_lock(&comp_done_lock);
@@ -1584,7 +1610,7 @@ retry:
             CompressParam *param = &comp_param[idx];
             qemu_mutex_lock(&param->mutex);
             param->done = false;
-            bytes_xmit = qemu_put_qemu_file(ms->to_dst_file, param->file);
+            bytes_xmit = send_queued_data(param);
             compress_reset_result(param);
             set_compress_params(param, block, offset);

--
2.30.2

Attachment: pgpOAz6Y0jdEQ.pgp
Description: OpenPGP digital signature


reply via email to

[Prev in Thread] Current Thread [Next in Thread]