[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 6/7] memory: make global_dirty_log a bitmask
From: |
huangy81 |
Subject: |
[PATCH v3 6/7] memory: make global_dirty_log a bitmask |
Date: |
Mon, 7 Jun 2021 09:13:12 +0800 |
From: Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
dirty rate measurement may start or stop dirty logging during
calculation. this conflict with migration because stop dirty
log make migration leave dirty pages out then that'll be a problem.
make global_dirty_log a bitmask can let both migration and dirty
rate measurement work fine. introduce GLOBAL_DIRTY_MIGRATION and
GLOBAL_DIRTY_DIRTY_RATE to distinguish what current dirty log aims
for, migration or dirty rate.
all references to global_dirty_log should be untouched because any bit
set there should justify that global dirty logging is enabled.
Signed-off-by: Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
---
include/exec/memory.h | 13 ++++++++++---
migration/ram.c | 8 ++++----
softmmu/memory.c | 36 +++++++++++++++++++++++++++---------
3 files changed, 41 insertions(+), 16 deletions(-)
diff --git a/include/exec/memory.h b/include/exec/memory.h
index c158fd7084..94c7088299 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -55,7 +55,10 @@ static inline void fuzz_dma_read_cb(size_t addr,
}
#endif
-extern bool global_dirty_log;
+#define GLOBAL_DIRTY_MIGRATION (1U<<0)
+#define GLOBAL_DIRTY_DIRTY_RATE (1U<<1)
+
+extern int global_dirty_log;
typedef struct MemoryRegionOps MemoryRegionOps;
@@ -2099,13 +2102,17 @@ void memory_listener_unregister(MemoryListener
*listener);
/**
* memory_global_dirty_log_start: begin dirty logging for all regions
+ *
+ * @flags: purpose of start dirty log, migration or dirty rate
*/
-void memory_global_dirty_log_start(void);
+void memory_global_dirty_log_start(int flags);
/**
* memory_global_dirty_log_stop: end dirty logging for all regions
+ *
+ * @flags: purpose of stop dirty log, migration or dirty rate
*/
-void memory_global_dirty_log_stop(void);
+void memory_global_dirty_log_stop(int flags);
void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
diff --git a/migration/ram.c b/migration/ram.c
index 60ea913c54..9ce31af9d1 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2190,7 +2190,7 @@ static void ram_save_cleanup(void *opaque)
/* caller have hold iothread lock or is in a bh, so there is
* no writing race against the migration bitmap
*/
- memory_global_dirty_log_stop();
+ memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
}
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
@@ -2652,7 +2652,7 @@ static void ram_init_bitmaps(RAMState *rs)
ram_list_init_bitmaps();
/* We don't use dirty log with background snapshots */
if (!migrate_background_snapshot()) {
- memory_global_dirty_log_start();
+ memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
migration_bitmap_sync_precopy(rs);
}
}
@@ -3393,7 +3393,7 @@ void colo_incoming_start_dirty_log(void)
/* Discard this dirty bitmap record */
bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS);
}
- memory_global_dirty_log_start();
+ memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
}
ram_state->migration_dirty_pages = 0;
qemu_mutex_unlock_ramlist();
@@ -3405,7 +3405,7 @@ void colo_release_ram_cache(void)
{
RAMBlock *block;
- memory_global_dirty_log_stop();
+ memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
g_free(block->bmap);
block->bmap = NULL;
diff --git a/softmmu/memory.c b/softmmu/memory.c
index c19b0be6b1..b93baba82d 100644
--- a/softmmu/memory.c
+++ b/softmmu/memory.c
@@ -39,7 +39,7 @@
static unsigned memory_region_transaction_depth;
static bool memory_region_update_pending;
static bool ioeventfd_update_pending;
-bool global_dirty_log;
+int global_dirty_log;
static QTAILQ_HEAD(, MemoryListener) memory_listeners
= QTAILQ_HEAD_INITIALIZER(memory_listeners);
@@ -2659,14 +2659,20 @@ void memory_global_after_dirty_log_sync(void)
static VMChangeStateEntry *vmstate_change;
-void memory_global_dirty_log_start(void)
+void memory_global_dirty_log_start(int flags)
{
if (vmstate_change) {
qemu_del_vm_change_state_handler(vmstate_change);
vmstate_change = NULL;
}
- global_dirty_log = true;
+ if (flags & GLOBAL_DIRTY_MIGRATION) {
+ global_dirty_log |= GLOBAL_DIRTY_MIGRATION;
+ }
+
+ if (flags & GLOBAL_DIRTY_DIRTY_RATE) {
+ global_dirty_log |= GLOBAL_DIRTY_DIRTY_RATE;
+ }
MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
@@ -2676,9 +2682,15 @@ void memory_global_dirty_log_start(void)
memory_region_transaction_commit();
}
-static void memory_global_dirty_log_do_stop(void)
+static void memory_global_dirty_log_do_stop(int flags)
{
- global_dirty_log = false;
+ if (flags & GLOBAL_DIRTY_MIGRATION) {
+ global_dirty_log &= ~GLOBAL_DIRTY_MIGRATION;
+ }
+
+ if (flags & GLOBAL_DIRTY_DIRTY_RATE) {
+ global_dirty_log &= ~GLOBAL_DIRTY_DIRTY_RATE;
+ }
/* Refresh DIRTY_MEMORY_MIGRATION bit. */
memory_region_transaction_begin();
@@ -2691,8 +2703,10 @@ static void memory_global_dirty_log_do_stop(void)
static void memory_vm_change_state_handler(void *opaque, bool running,
RunState state)
{
+ int *flags = opaque;
if (running) {
- memory_global_dirty_log_do_stop();
+ memory_global_dirty_log_do_stop(*flags);
+ g_free(opaque);
if (vmstate_change) {
qemu_del_vm_change_state_handler(vmstate_change);
@@ -2701,18 +2715,22 @@ static void memory_vm_change_state_handler(void
*opaque, bool running,
}
}
-void memory_global_dirty_log_stop(void)
+void memory_global_dirty_log_stop(int flags)
{
+ int *opaque = NULL;
if (!runstate_is_running()) {
if (vmstate_change) {
return;
}
+
+ opaque = g_malloc0(sizeof(opaque));
+ *opaque = flags;
vmstate_change = qemu_add_vm_change_state_handler(
- memory_vm_change_state_handler, NULL);
+ memory_vm_change_state_handler, opaque);
return;
}
- memory_global_dirty_log_do_stop();
+ memory_global_dirty_log_do_stop(flags);
}
static void listener_add_address_space(MemoryListener *listener,
--
2.18.2
[PATCH v3 1/7] migration/dirtyrate: make sample page count configurable, huangy81, 2021/06/06
[PATCH v3 2/7] hmp: Add "calc_dirty_rate" and "info dirty_rate" cmds, huangy81, 2021/06/06