[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH COLO-Frame v17 10/34] COLO: Load PVM's dirty pages i
From: |
zhanghailiang |
Subject: |
[Qemu-devel] [PATCH COLO-Frame v17 10/34] COLO: Load PVM's dirty pages into SVM's RAM cache temporarily |
Date: |
Fri, 3 Jun 2016 15:52:22 +0800 |
We should not load PVM's state directly into SVM, because there maybe some
errors happen when SVM is receving data, which will break SVM.
We need to ensure receving all data before load the state into SVM. We use
an extra memory to cache these data (PVM's ram). The ram cache in secondary side
is initially the same as SVM/PVM's memory. And in the process of checkpoint,
we cache the dirty pages of PVM into this ram cache firstly, so this ram cache
always the same as PVM's memory at every checkpoint, then we flush this cached
ram
to SVM after we receive all PVM's state.
Signed-off-by: zhanghailiang <address@hidden>
Signed-off-by: Li Zhijian <address@hidden>
Signed-off-by: Gonglei <address@hidden>
Reviewed-by: Dr. David Alan Gilbert <address@hidden>
---
v12:
- Fix minor error in error_report (Dave's comment)
- Add Reviewed-by tag
v11:
- Rename 'host_cache' to 'colo_cache' (Dave's suggestion)
v10:
- Split the process of dirty pages recording into a new patch
---
include/exec/ram_addr.h | 1 +
include/migration/migration.h | 4 +++
migration/colo.c | 11 +++++++
migration/ram.c | 73 ++++++++++++++++++++++++++++++++++++++++++-
4 files changed, 88 insertions(+), 1 deletion(-)
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 2a9465d..b4c04fb 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -26,6 +26,7 @@ struct RAMBlock {
struct rcu_head rcu;
struct MemoryRegion *mr;
uint8_t *host;
+ uint8_t *colo_cache; /* For colo, VM's ram cache */
ram_addr_t offset;
ram_addr_t used_length;
ram_addr_t max_length;
diff --git a/include/migration/migration.h b/include/migration/migration.h
index 55a2df6..5cd1ff1 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -360,4 +360,8 @@ int ram_save_queue_pages(MigrationState *ms, const char
*rbname,
PostcopyState postcopy_state_get(void);
/* Set the state and return the old state */
PostcopyState postcopy_state_set(PostcopyState new_state);
+
+/* ram cache */
+int colo_init_ram_cache(void);
+void colo_release_ram_cache(void);
#endif
diff --git a/migration/colo.c b/migration/colo.c
index 8ef1a22..aa8c7e1 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -287,6 +287,7 @@ void *colo_process_incoming_thread(void *opaque)
{
MigrationIncomingState *mis = opaque;
Error *local_err = NULL;
+ int ret;
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
MIGRATION_STATUS_COLO);
@@ -303,6 +304,12 @@ void *colo_process_incoming_thread(void *opaque)
*/
qemu_file_set_blocking(mis->from_src_file, true);
+ ret = colo_init_ram_cache();
+ if (ret < 0) {
+ error_report("Failed to initialize ram cache");
+ goto out;
+ }
+
colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY,
&local_err);
if (local_err) {
@@ -353,6 +360,10 @@ out:
error_report_err(local_err);
}
+ qemu_mutex_lock_iothread();
+ colo_release_ram_cache();
+ qemu_mutex_unlock_iothread();
+
if (mis->to_src_file) {
qemu_fclose(mis->to_src_file);
}
diff --git a/migration/ram.c b/migration/ram.c
index ae9a656..327e872 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -227,6 +227,7 @@ static RAMBlock *last_sent_block;
static ram_addr_t last_offset;
static QemuMutex migration_bitmap_mutex;
static uint64_t migration_dirty_pages;
+static bool ram_cache_enable;
static uint32_t last_version;
static bool ram_bulk_stage;
@@ -2192,6 +2193,20 @@ static inline void *host_from_ram_block_offset(RAMBlock
*block,
return block->host + offset;
}
+static inline void *colo_cache_from_block_offset(RAMBlock *block,
+ ram_addr_t offset)
+{
+ if (!offset_in_ramblock(block, offset)) {
+ return NULL;
+ }
+ if (!block->colo_cache) {
+ error_report("%s: colo_cache is NULL in block :%s",
+ __func__, block->idstr);
+ return NULL;
+ }
+ return block->colo_cache + offset;
+}
+
/*
* If a page (or a whole RDMA chunk) has been
* determined to be zero, then zap it.
@@ -2468,7 +2483,12 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
RAMBlock *block = ram_block_from_stream(f, flags);
- host = host_from_ram_block_offset(block, addr);
+ /* After going into COLO, we should load the Page into colo_cache
*/
+ if (ram_cache_enable) {
+ host = colo_cache_from_block_offset(block, addr);
+ } else {
+ host = host_from_ram_block_offset(block, addr);
+ }
if (!host) {
error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
ret = -EINVAL;
@@ -2563,6 +2583,57 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
return ret;
}
+/*
+ * colo cache: this is for secondary VM, we cache the whole
+ * memory of the secondary VM, it will be called after first migration.
+ */
+int colo_init_ram_cache(void)
+{
+ RAMBlock *block;
+
+ rcu_read_lock();
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ block->colo_cache = qemu_anon_ram_alloc(block->used_length, NULL);
+ if (!block->colo_cache) {
+ error_report("%s: Can't alloc memory for COLO cache of block %s,"
+ "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
+ block->used_length);
+ goto out_locked;
+ }
+ memcpy(block->colo_cache, block->host, block->used_length);
+ }
+ rcu_read_unlock();
+ ram_cache_enable = true;
+ return 0;
+
+out_locked:
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ if (block->colo_cache) {
+ qemu_anon_ram_free(block->colo_cache, block->used_length);
+ block->colo_cache = NULL;
+ }
+ }
+
+ rcu_read_unlock();
+ return -errno;
+}
+
+void colo_release_ram_cache(void)
+{
+ RAMBlock *block;
+
+ ram_cache_enable = false;
+
+ rcu_read_lock();
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ if (block->colo_cache) {
+ qemu_anon_ram_free(block->colo_cache, block->used_length);
+ block->colo_cache = NULL;
+ }
+ }
+ rcu_read_unlock();
+}
+
static SaveVMHandlers savevm_ram_handlers = {
.save_live_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
--
1.8.3.1
- [Qemu-devel] [PATCH COLO-Frame v17 04/34] migration: Integrate COLO checkpoint process into migration, (continued)
- [Qemu-devel] [PATCH COLO-Frame v17 04/34] migration: Integrate COLO checkpoint process into migration, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 02/34] migration: Introduce capability 'x-colo' to migration, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 07/34] COLO: Implement COLO checkpoint protocol, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 09/34] COLO: Save PVM state to secondary side when do checkpoint, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 13/34] COLO: Flush PVM's cached RAM into SVM's memory, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 12/34] COLO: Load VMState into buffer before restore it, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 15/34] COLO: Synchronize PVM's state to SVM periodically, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 14/34] COLO: Add checkpoint-delay parameter for migrate-set-parameters, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 08/34] COLO: Add a new RunState RUN_STATE_COLO, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 27/34] migration/savevm: Export two helper functions for savevm process, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 10/34] COLO: Load PVM's dirty pages into SVM's RAM cache temporarily,
zhanghailiang <=
- [Qemu-devel] [PATCH COLO-Frame v17 21/34] COLO failover: Shutdown related socket fd when do failover, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 03/34] COLO: migrate colo related info to secondary node, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 11/34] ram/COLO: Record the dirty pages that SVM received, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 18/34] COLO: Implement failover work for Primary VM, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 22/34] COLO failover: Don't do failover during loading VM's state, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 29/34] COLO: Split qemu_savevm_state_begin out of checkpoint process, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 28/34] COLO: Separate the process of saving/loading ram and device state, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 25/34] savevm: Introduce two helper functions for save/find loadvm_handlers entry, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 19/34] COLO: Implement failover work for Secondary VM, zhanghailiang, 2016/06/03
- [Qemu-devel] [PATCH COLO-Frame v17 24/34] COLO: Update the global runstate after going into colo state, zhanghailiang, 2016/06/03