qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v3 5/7] cancel mechanism for an already running dump


From: Sanidhya Kashyap
Subject: [Qemu-devel] [PATCH v3 5/7] cancel mechanism for an already running dump bitmap process
Date: Thu, 12 Jun 2014 16:06:38 +0530

Signed-off-by: Sanidhya Kashyap <address@hidden>
---
 hmp-commands.hx  | 14 ++++++++++++++
 hmp.c            |  5 +++++
 hmp.h            |  1 +
 qapi-schema.json |  8 ++++++++
 qmp-commands.hx  | 20 ++++++++++++++++++++
 savevm.c         | 19 +++++++++++++++++++
 6 files changed, 67 insertions(+)

diff --git a/hmp-commands.hx b/hmp-commands.hx
index cccd53e..a5174ea 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -1804,6 +1804,20 @@ STEXI
 dumps the writable working set of a VM's memory to a file
 ETEXI
 
+       {
+       .name       = "ldbc|log-dirty-bitmap-cancel",
+       .args_type  = "",
+       .params     = "",
+       .help       = "cancel the current bitmap dump process",
+       .mhandler.cmd = hmp_log_dirty_bitmap_cancel,
+},
+
+STEXI
address@hidden ldbc or log-dirty-bitmap-cancel
address@hidden log-dirty-bitmap-cancel
+Cancel the current bitmap dump process
+ETEXI
+
 STEXI
 @end table
 ETEXI
diff --git a/hmp.c b/hmp.c
index a400825..fed8795 100644
--- a/hmp.c
+++ b/hmp.c
@@ -1330,6 +1330,11 @@ void hmp_log_dirty_bitmap(Monitor *mon, const QDict 
*qdict)
     }
 }
 
+void hmp_log_dirty_bitmap_cancel(Monitor *mon, const QDict *qdict)
+{
+    qmp_log_dirty_bitmap_cancel(NULL);
+}
+
 void hmp_dump_guest_memory(Monitor *mon, const QDict *qdict)
 {
     Error *err = NULL;
diff --git a/hmp.h b/hmp.h
index fbb08c5..6d4d672 100644
--- a/hmp.h
+++ b/hmp.h
@@ -94,6 +94,7 @@ void hmp_cpu_add(Monitor *mon, const QDict *qdict);
 void hmp_object_add(Monitor *mon, const QDict *qdict);
 void hmp_object_del(Monitor *mon, const QDict *qdict);
 void hmp_log_dirty_bitmap(Monitor *mon, const QDict *qdict);
+void hmp_log_dirty_bitmap_cancel(Monitor *mon, const QDict *qdict);
 void object_add_completion(ReadLineState *rs, int nb_args, const char *str);
 void object_del_completion(ReadLineState *rs, int nb_args, const char *str);
 void device_add_completion(ReadLineState *rs, int nb_args, const char *str);
diff --git a/qapi-schema.json b/qapi-schema.json
index aa78540..9e07f9d 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -3100,3 +3100,11 @@
   'data'    : { 'filename'      : 'str',
                 '*epochs'       : 'int',
                 '*frequency'    : 'int' } }
+##
+# @log-dirty-bitmap-cancel
+#
+# cancel the dirty bitmap logging process
+#
+# Since 2.1
+##
+{ 'command': 'log-dirty-bitmap-cancel' }
diff --git a/qmp-commands.hx b/qmp-commands.hx
index 183a636..2a8dacc 100644
--- a/qmp-commands.hx
+++ b/qmp-commands.hx
@@ -3605,3 +3605,23 @@ value is 3 while that of frequency is 10.
 
 EQMP
 
+       {
+        .name       = "log-dirty-bitmap-cancel",
+        .args_type  = "",
+        .mhandler.cmd_new = qmp_marshal_input_log_dirty_bitmap_cancel,
+    },
+
+SQMP
+log_bitmap_cancel
+--------------
+
+Cancel the current bitmap dump process.
+
+Arguments: None.
+
+Example:
+
+-> { "execute": "log-dirty-bitmap-cancel" }
+<- { "return": {} }
+
+EQMP
diff --git a/savevm.c b/savevm.c
index 40b6caf..e38eca8 100644
--- a/savevm.c
+++ b/savevm.c
@@ -1326,6 +1326,25 @@ void qmp_log_dirty_bitmap(const char *filename, bool 
has_epochs,
     return;
 }
 
+static void logging_bitmap_cancel(BitmapLogState *b)
+{
+    int old_state;
+    do {
+        old_state = b->state;
+        if (old_state != LOG_BITMAP_STATE_SETUP &&
+            old_state != LOG_BITMAP_STATE_ACTIVE) {
+            break;
+        }
+        logging_state_set_status(b, old_state,
+                                 LOG_BITMAP_STATE_CANCELING);
+    } while (b->state != LOG_BITMAP_STATE_CANCELING);
+}
+
+void qmp_log_dirty_bitmap_cancel(Error **errp)
+{
+    logging_bitmap_cancel(logging_current_state());
+}
+
 void qmp_xen_save_devices_state(const char *filename, Error **errp)
 {
     QEMUFile *f;
-- 
1.9.3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]