[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCHv4 7/9] migration: do not sent zero pages in bulk sta
From: |
Peter Lieven |
Subject: |
[Qemu-devel] [PATCHv4 7/9] migration: do not sent zero pages in bulk stage |
Date: |
Fri, 22 Mar 2013 13:46:08 +0100 |
during bulk stage of ram migration if a page is a
zero page do not send it at all.
the memory at the destination reads as zero anyway.
even if there is an madvise with QEMU_MADV_DONTNEED
at the target upon receipt of a zero page I have observed
that the target starts swapping if the memory is overcommitted.
it seems that the pages are dropped asynchronously.
Signed-off-by: Peter Lieven <address@hidden>
---
arch_init.c | 24 ++++++++++++++++++++----
hmp.c | 2 ++
include/migration/migration.h | 2 ++
migration.c | 3 ++-
qapi-schema.json | 6 ++++--
qmp-commands.hx | 3 ++-
6 files changed, 32 insertions(+), 8 deletions(-)
diff --git a/arch_init.c b/arch_init.c
index 4c4caf4..c34a4af 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -181,6 +181,7 @@ int64_t xbzrle_cache_resize(int64_t new_size)
/* accounting for migration statistics */
typedef struct AccountingInfo {
uint64_t dup_pages;
+ uint64_t skipped_pages;
uint64_t norm_pages;
uint64_t iterations;
uint64_t xbzrle_bytes;
@@ -206,6 +207,16 @@ uint64_t dup_mig_pages_transferred(void)
return acct_info.dup_pages;
}
+uint64_t skipped_mig_bytes_transferred(void)
+{
+ return acct_info.skipped_pages * TARGET_PAGE_SIZE;
+}
+
+uint64_t skipped_mig_pages_transferred(void)
+{
+ return acct_info.skipped_pages;
+}
+
uint64_t norm_mig_bytes_transferred(void)
{
return acct_info.norm_pages * TARGET_PAGE_SIZE;
@@ -438,10 +449,15 @@ static int ram_save_block(QEMUFile *f, bool last_stage)
bytes_sent = -1;
if (is_zero_page(p)) {
acct_info.dup_pages++;
- bytes_sent = save_block_hdr(f, block, offset, cont,
- RAM_SAVE_FLAG_COMPRESS);
- qemu_put_byte(f, 0);
- bytes_sent++;
+ if (!ram_bulk_stage) {
+ bytes_sent = save_block_hdr(f, block, offset, cont,
+ RAM_SAVE_FLAG_COMPRESS);
+ qemu_put_byte(f, 0);
+ bytes_sent++;
+ } else {
+ acct_info.skipped_pages++;
+ bytes_sent = 0;
+ }
} else if (migrate_use_xbzrle()) {
current_addr = block->offset + offset;
bytes_sent = save_xbzrle_page(f, p, current_addr, block,
diff --git a/hmp.c b/hmp.c
index b0a861c..e3e833e 100644
--- a/hmp.c
+++ b/hmp.c
@@ -173,6 +173,8 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
info->ram->total >> 10);
monitor_printf(mon, "duplicate: %" PRIu64 " pages\n",
info->ram->duplicate);
+ monitor_printf(mon, "skipped: %" PRIu64 " pages\n",
+ info->ram->skipped);
monitor_printf(mon, "normal: %" PRIu64 " pages\n",
info->ram->normal);
monitor_printf(mon, "normal bytes: %" PRIu64 " kbytes\n",
diff --git a/include/migration/migration.h b/include/migration/migration.h
index bb617fd..e2acec6 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -96,6 +96,8 @@ extern SaveVMHandlers savevm_ram_handlers;
uint64_t dup_mig_bytes_transferred(void);
uint64_t dup_mig_pages_transferred(void);
+uint64_t skipped_mig_bytes_transferred(void);
+uint64_t skipped_mig_pages_transferred(void);
uint64_t norm_mig_bytes_transferred(void);
uint64_t norm_mig_pages_transferred(void);
uint64_t xbzrle_mig_bytes_transferred(void);
diff --git a/migration.c b/migration.c
index 185d112..7fb2147 100644
--- a/migration.c
+++ b/migration.c
@@ -197,11 +197,11 @@ MigrationInfo *qmp_query_migrate(Error **errp)
info->ram->remaining = ram_bytes_remaining();
info->ram->total = ram_bytes_total();
info->ram->duplicate = dup_mig_pages_transferred();
+ info->ram->skipped = skipped_mig_pages_transferred();
info->ram->normal = norm_mig_pages_transferred();
info->ram->normal_bytes = norm_mig_bytes_transferred();
info->ram->dirty_pages_rate = s->dirty_pages_rate;
-
if (blk_mig_active()) {
info->has_disk = true;
info->disk = g_malloc0(sizeof(*info->disk));
@@ -227,6 +227,7 @@ MigrationInfo *qmp_query_migrate(Error **errp)
info->ram->remaining = 0;
info->ram->total = ram_bytes_total();
info->ram->duplicate = dup_mig_pages_transferred();
+ info->ram->skipped = skipped_mig_pages_transferred();
info->ram->normal = norm_mig_pages_transferred();
info->ram->normal_bytes = norm_mig_bytes_transferred();
break;
diff --git a/qapi-schema.json b/qapi-schema.json
index fdaa9da..b737460 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -496,7 +496,9 @@
#
# @total: total amount of bytes involved in the migration process
#
-# @duplicate: number of duplicate pages (since 1.2)
+# @duplicate: number of duplicate (zero) pages (since 1.2)
+#
+# @skipped: number of skipped zero pages (since 1.5)
#
# @normal : number of normal pages (since 1.2)
#
@@ -510,7 +512,7 @@
{ 'type': 'MigrationStats',
'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
'duplicate': 'int', 'normal': 'int', 'normal-bytes': 'int',
- 'dirty-pages-rate' : 'int' } }
+ 'dirty-pages-rate' : 'int', 'skipped': 'int' } }
##
# @XBZRLECacheStats
diff --git a/qmp-commands.hx b/qmp-commands.hx
index b370060..fed74c6 100644
--- a/qmp-commands.hx
+++ b/qmp-commands.hx
@@ -2442,7 +2442,8 @@ The main json-object contains the following:
- "transferred": amount transferred (json-int)
- "remaining": amount remaining (json-int)
- "total": total (json-int)
- - "duplicate": number of duplicated pages (json-int)
+ - "duplicate": number of duplicated (zero) pages (json-int)
+ - "skipped": number of skipped zero pages (json-int)
- "normal" : number of normal pages transferred (json-int)
- "normal-bytes" : number of normal bytes transferred (json-int)
- "disk": only present if "status" is "active" and it is a block migration,
--
1.7.9.5
[Qemu-devel] [PATCHv4 7/9] migration: do not sent zero pages in bulk stage,
Peter Lieven <=
Re: [Qemu-devel] [PATCHv4 0/9] buffer_is_zero / migration optimizations, Paolo Bonzini, 2013/03/22
- Re: [Qemu-devel] [PATCHv4 0/9] buffer_is_zero / migration optimizations, Peter Lieven, 2013/03/22
- Re: [Qemu-devel] [PATCHv4 0/9] buffer_is_zero / migration optimizations, Paolo Bonzini, 2013/03/22
- Re: [Qemu-devel] [PATCHv4 0/9] buffer_is_zero / migration optimizations, Peter Lieven, 2013/03/23
- Re: [Qemu-devel] [PATCHv4 0/9] buffer_is_zero / migration optimizations, Peter Lieven, 2013/03/25
- Re: [Qemu-devel] [PATCHv4 0/9] buffer_is_zero / migration optimizations, Paolo Bonzini, 2013/03/25
- Re: [Qemu-devel] [PATCHv4 0/9] buffer_is_zero / migration optimizations, Peter Lieven, 2013/03/25
- Re: [Qemu-devel] [PATCHv4 0/9] buffer_is_zero / migration optimizations, Paolo Bonzini, 2013/03/25