[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH 6/8] migration/dirtyrate: Implement get_sample_gap_period() a
From: |
Chuan Zheng |
Subject: |
[RFC PATCH 6/8] migration/dirtyrate: Implement get_sample_gap_period() and block_sample_gap_period() |
Date: |
Sat, 25 Jul 2020 11:11:07 +0800 |
From: Zheng Chuan <zhengchuan@huawei.com>
Implement get_sample_gap_period() and block_sample_gap_period() to
sleep specific time between sample actions.
Signed-off-by: Zheng Chuan <zhengchuan@huawei.com>
Signed-off-by: YanYing Zhang <ann.zhuangyanying@huawei.com>
---
migration/dirtyrate.c | 28 ++++++++++++++++++++++++++++
migration/dirtyrate.h | 6 +++++-
2 files changed, 33 insertions(+), 1 deletion(-)
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index 7badc53..00abfa7 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -295,10 +295,38 @@ static void
set_dirty_rate_stage(CalculatingDirtyRateStage ratestage)
calculating_dirty_rate_stage = ratestage;
}
+static int64_t block_sample_gap_period(int64_t msec, int64_t initial_time)
+{
+ int64_t current_time;
+
+ current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+ if ((current_time - initial_time) >= msec) {
+ msec = current_time - initial_time;
+ } else {
+ g_usleep((msec + initial_time - current_time) * 1000);
+ }
+
+ return msec;
+}
+
+static int64_t get_sample_gap_period(struct dirtyrate_config config)
+{
+ int64_t msec;
+
+ msec = config.sample_period_seconds * 1000;
+ if (msec <= MIN_FETCH_DIRTYRATE_TIME_MSEC || msec >
MAX_FETCH_DIRTYRATE_TIME_MSEC) {
+ msec = DEFAULT_FETCH_DIRTYRATE_TIME_MSEC;
+ }
+ return msec;
+}
+
void *get_dirtyrate_thread(void *arg)
{
struct dirtyrate_config config = *(struct dirtyrate_config *)arg;
int64_t msec = 0;
+
+ /* max period is 60 seconds */
+ msec = get_sample_gap_period(config);
set_dirty_rate_stage(CAL_DIRTY_RATE_ING);
diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h
index 4d9b3b8..5aef2d7 100644
--- a/migration/dirtyrate.h
+++ b/migration/dirtyrate.h
@@ -14,12 +14,16 @@
#define QEMU_MIGRATION_DIRTYRATE_H
/* take 256 pages per GB for cal dirty rate */
-#define DIRTYRATE_DEFAULT_SAMPLE_PAGES 256
+#define DIRTYRATE_DEFAULT_SAMPLE_PAGES 256
#define DIRTYRATE_SAMPLE_PAGE_SIZE 4096
#define DIRTYRATE_PAGE_SIZE_SHIFT 12
#define BLOCK_INFO_MAX_LEN 256
#define PAGE_SIZE_SHIFT 20
+#define MIN_FETCH_DIRTYRATE_TIME_MSEC 0
+#define MAX_FETCH_DIRTYRATE_TIME_MSEC 60000
+#define DEFAULT_FETCH_DIRTYRATE_TIME_MSEC 1000
+
struct dirtyrate_config {
uint64_t sample_pages_per_gigabytes;
int64_t sample_period_seconds;
--
1.8.3.1
- [RFC PATCH 0/8] *** A Method for evaluating dirty page rate ***, Chuan Zheng, 2020/07/24
- [RFC PATCH 3/8] migration/dirtyrate: Add dirtyrate statistics series functions, Chuan Zheng, 2020/07/24
- [RFC PATCH 4/8] migration/dirtyrate: Record hash results for each ramblock, Chuan Zheng, 2020/07/24
- [RFC PATCH 1/8] migration/dirtyrate: Add get_dirtyrate_thread() function, Chuan Zheng, 2020/07/24
- [RFC PATCH 7/8] migration/dirtyrate: Implement calculate_dirtyrate() function, Chuan Zheng, 2020/07/24
- [RFC PATCH 2/8] migration/dirtyrate: Add block_dirty_info to store dirtypage info, Chuan Zheng, 2020/07/24
- [RFC PATCH 6/8] migration/dirtyrate: Implement get_sample_gap_period() and block_sample_gap_period(),
Chuan Zheng <=
- [RFC PATCH 5/8] migration/dirtyrate: Compare hash results for recorded ramblock, Chuan Zheng, 2020/07/24
- [RFC PATCH 8/8] migration/dirtyrate: Implement qmp_cal_dirty_rate()/qmp_get_dirty_rate() function, Chuan Zheng, 2020/07/24