[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v10 08/14] migration: Transfer pages over new channe
From: |
Juan Quintela |
Subject: |
[Qemu-devel] [PATCH v10 08/14] migration: Transfer pages over new channels |
Date: |
Wed, 10 Jan 2018 13:47:17 +0100 |
We switch for sending the page number to send real pages.
Signed-off-by: Juan Quintela <address@hidden>
--
Remove the HACK bit, now we have the function that calculates the size
of a page exported.
Rename multifd_pages{_now}, to sent pages
Remove multifd pages field, it is the same than normal pages
Merge test channels here
Make sent_pages also work for non multifd case
---
migration/migration.c | 10 +++++++++-
migration/ram.c | 41 +++++++++++++++++++++++++++++------------
2 files changed, 38 insertions(+), 13 deletions(-)
diff --git a/migration/migration.c b/migration/migration.c
index 1545f3a0b0..6ebcfa36cc 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -2230,6 +2230,8 @@ static void *migration_thread(void *opaque)
*/
int64_t threshold_size = 0;
int64_t qemu_file_bytes = 0;
+ /* Stores how many pages we have sent */
+ int64_t sent_pages = 0;
int64_t start_time = initial_time;
int64_t end_time;
bool old_vm_running = false;
@@ -2318,8 +2320,13 @@ static void *migration_thread(void *opaque)
current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
if (current_time >= initial_time + BUFFER_DELAY) {
uint64_t qemu_file_bytes_now = qemu_ftell(s->to_dst_file);
+ uint64_t sent_pages_now = ram_counters.normal;
+ /* multifd sends data out of the qemu_file */
+ uint64_t multifd_transferred = migrate_use_multifd() ?
+ (sent_pages_now - sent_pages) * qemu_target_page_size() : 0;
uint64_t transferred_bytes =
- qemu_file_bytes_now - qemu_file_bytes;
+ (qemu_file_bytes_now - qemu_file_bytes) +
+ multifd_transferred;
uint64_t time_spent = current_time - initial_time;
double bandwidth = (double)transferred_bytes / time_spent;
threshold_size = bandwidth * s->parameters.downtime_limit;
@@ -2339,6 +2346,7 @@ static void *migration_thread(void *opaque)
qemu_file_reset_rate_limit(s->to_dst_file);
initial_time = current_time;
qemu_file_bytes = qemu_file_bytes_now;
+ sent_pages = sent_pages_now;
}
if (qemu_file_rate_limit(s->to_dst_file)) {
/* usleep expects microseconds */
diff --git a/migration/ram.c b/migration/ram.c
index 8443806f12..20f3726909 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -542,12 +542,20 @@ static void *multifd_send_thread(void *opaque)
break;
}
if (p->pages->used) {
+ Error *local_err = NULL;
+ size_t ret;
+ uint32_t used;
+
+ used = p->pages->used;
p->pages->used = 0;
qemu_mutex_unlock(&p->mutex);
- trace_multifd_send(p->id, p->pages->seq, p->pages->used);
- /* ToDo: send page here */
-
+ trace_multifd_send(p->id, p->pages->seq, used);
+ ret = qio_channel_writev_all(p->c, p->pages->iov, used,
&local_err);
+ if (ret != 0) {
+ terminate_multifd_send_threads(local_err);
+ return NULL;
+ }
qemu_mutex_lock(&multifd_send_state->mutex);
p->done = true;
p->packets_sent++;
@@ -754,12 +762,21 @@ static void *multifd_recv_thread(void *opaque)
break;
}
if (p->pages->used) {
+ Error *local_err = NULL;
+ size_t ret;
+ uint32_t used;
+
+ used = p->pages->used;
p->pages->used = 0;
+ qemu_mutex_unlock(&p->mutex);
- trace_multifd_recv(p->id, p->pages->seq, p->pages->used);
-
- /* ToDo: receive pages here */
-
+ trace_multifd_recv(p->id, p->pages->seq, used);
+ ret = qio_channel_readv_all(p->c, p->pages->iov, used, &local_err);
+ if (ret != 0) {
+ terminate_multifd_recv_threads(local_err);
+ return NULL;
+ }
+ qemu_mutex_lock(&p->mutex);
p->done = true;
p->packets_recv++;
qemu_mutex_unlock(&p->mutex);
@@ -1311,12 +1328,9 @@ static int ram_multifd_page(RAMState *rs,
PageSearchStatus *pss,
{
int pages;
uint16_t fd_num;
- uint8_t *p;
RAMBlock *block = pss->block;
ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
- p = block->host + offset;
-
pages = save_zero_page(rs, block, offset);
if (pages == -1) {
ram_counters.transferred +=
@@ -1325,8 +1339,12 @@ static int ram_multifd_page(RAMState *rs,
PageSearchStatus *pss,
fd_num = multifd_send_page(block, offset,
rs->migration_dirty_pages == 1);
qemu_put_be16(rs->f, fd_num);
+ if (fd_num != MULTIFD_CONTINUE) {
+ /* We start with a different channel.
+ Flush pending work */
+ qemu_fflush(rs->f);
+ }
ram_counters.transferred += 2; /* size of fd_num */
- qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
ram_counters.transferred += TARGET_PAGE_SIZE;
pages = 1;
ram_counters.normal++;
@@ -3278,7 +3296,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
case RAM_SAVE_FLAG_MULTIFD_PAGE:
fd_num = qemu_get_be16(f);
multifd_recv_page(block, addr, host, fd_num);
- qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
break;
case RAM_SAVE_FLAG_EOS:
--
2.14.3
- Re: [Qemu-devel] [PATCH v10 01/14] migration: Make migrate_fd_error() the owner of the Error, (continued)
- [Qemu-devel] [PATCH v10 03/14] migration: Drop current address parameter from save_zero_page(), Juan Quintela, 2018/01/10
- [Qemu-devel] [PATCH v10 05/14] migration: Create ram_multifd_page, Juan Quintela, 2018/01/10
- [Qemu-devel] [PATCH v10 07/14] migration: Create thread infrastructure for multifd recv side, Juan Quintela, 2018/01/10
- [Qemu-devel] [PATCH v10 04/14] migration: Start of multiple fd work, Juan Quintela, 2018/01/10
- [Qemu-devel] [PATCH v10 08/14] migration: Transfer pages over new channels,
Juan Quintela <=
- [Qemu-devel] [PATCH v10 06/14] migration: Send the fd number which we are going to use for this page, Juan Quintela, 2018/01/10
- [Qemu-devel] [PATCH v10 11/14] LOCAL: use trace events for migration-test, Juan Quintela, 2018/01/10
- [Qemu-devel] [PATCH v10 10/14] migration: Add multifd test, Juan Quintela, 2018/01/10
- [Qemu-devel] [PATCH v10 09/14] migration: Flush receive queue, Juan Quintela, 2018/01/10
- [Qemu-devel] [PATCH v10 12/14] migration: Sent the page list over the normal thread, Juan Quintela, 2018/01/10
- [Qemu-devel] [PATCH v10 13/14] migration: Add multifd_send_packet trace, Juan Quintela, 2018/01/10