[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 03/11] Use new created qemu_target_pages_to_MiB()
From: |
Juan Quintela |
Subject: |
[PULL 03/11] Use new created qemu_target_pages_to_MiB() |
Date: |
Mon, 15 May 2023 14:33:26 +0200 |
Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20230511141208.17779-3-quintela@redhat.com>
---
migration/dirtyrate.c | 11 +++++------
softmmu/dirtylimit.c | 11 +++--------
2 files changed, 8 insertions(+), 14 deletions(-)
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index 5bac984fa5..ae52c42c4c 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -16,6 +16,7 @@
#include "qapi/error.h"
#include "cpu.h"
#include "exec/ramblock.h"
+#include "exec/target_page.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "qemu/main-loop.h"
@@ -75,13 +76,11 @@ static inline void record_dirtypages(DirtyPageRecord
*dirty_pages,
static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
int64_t calc_time_ms)
{
- uint64_t memory_size_MB;
uint64_t increased_dirty_pages =
dirty_pages.end_pages - dirty_pages.start_pages;
+ uint64_t memory_size_MiB = qemu_target_pages_to_MiB(increased_dirty_pages);
- memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20;
-
- return memory_size_MB * 1000 / calc_time_ms;
+ return memory_size_MiB * 1000 / calc_time_ms;
}
void global_dirty_log_change(unsigned int flag, bool start)
@@ -292,8 +291,8 @@ static void update_dirtyrate_stat(struct RamblockDirtyInfo
*info)
DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count;
DirtyStat.page_sampling.total_sample_count += info->sample_pages_count;
/* size of total pages in MB */
- DirtyStat.page_sampling.total_block_mem_MB += (info->ramblock_pages *
- TARGET_PAGE_SIZE) >> 20;
+ DirtyStat.page_sampling.total_block_mem_MB +=
+ qemu_target_pages_to_MiB(info->ramblock_pages);
}
static void update_dirtyrate(uint64_t msec)
diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c
index 71bf6dc7a4..015a9038d1 100644
--- a/softmmu/dirtylimit.c
+++ b/softmmu/dirtylimit.c
@@ -235,20 +235,15 @@ bool dirtylimit_vcpu_index_valid(int cpu_index)
static uint64_t dirtylimit_dirty_ring_full_time(uint64_t dirtyrate)
{
static uint64_t max_dirtyrate;
- unsigned target_page_bits = qemu_target_page_bits();
- uint64_t dirty_ring_size_MB;
+ uint64_t dirty_ring_size_MiB;
- /* So far, the largest (non-huge) page size is 64k, i.e. 16 bits. */
- assert(target_page_bits < 20);
-
- /* Convert ring size (pages) to MiB (2**20). */
- dirty_ring_size_MB = kvm_dirty_ring_size() >> (20 - target_page_bits);
+ dirty_ring_size_MiB = qemu_target_pages_to_MiB(kvm_dirty_ring_size());
if (max_dirtyrate < dirtyrate) {
max_dirtyrate = dirtyrate;
}
- return dirty_ring_size_MB * 1000000 / max_dirtyrate;
+ return dirty_ring_size_MiB * 1000000 / max_dirtyrate;
}
static inline bool dirtylimit_done(uint64_t quota,
--
2.40.1
- [PULL 00/11] Migration 20230515 patches, Juan Quintela, 2023/05/15
- [PULL 01/11] migration/calc-dirty-rate: replaced CRC32 with xxHash, Juan Quintela, 2023/05/15
- [PULL 02/11] softmmu: Create qemu_target_pages_to_MiB(), Juan Quintela, 2023/05/15
- [PULL 07/11] migration: A rate limit value of 0 is valid, Juan Quintela, 2023/05/15
- [PULL 11/11] qemu-file: Remove total from qemu_file_total_transferred_*(), Juan Quintela, 2023/05/15
- [PULL 09/11] qemu-file: make qemu_file_[sg]et_rate_limit() use an uint64_t, Juan Quintela, 2023/05/15
- [PULL 05/11] migration: Teach dirtyrate about qemu_target_page_bits(), Juan Quintela, 2023/05/15
- [PULL 06/11] migration: Make dirtyrate.c target independent, Juan Quintela, 2023/05/15
- [PULL 03/11] Use new created qemu_target_pages_to_MiB(),
Juan Quintela <=
- [PULL 04/11] migration: Teach dirtyrate about qemu_target_page_size(), Juan Quintela, 2023/05/15
- [PULL 08/11] migration: We set the rate_limit by a second, Juan Quintela, 2023/05/15
- [PULL 10/11] qemu-file: Make rate_limit_used an uint64_t, Juan Quintela, 2023/05/15
- Re: [PULL 00/11] Migration 20230515 patches, Richard Henderson, 2023/05/15