[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v6 05/10] KVM: Provide helper to sync dirty bitmap from slot to r
From: |
Peter Xu |
Subject: |
[PATCH v6 05/10] KVM: Provide helper to sync dirty bitmap from slot to ramblock |
Date: |
Wed, 24 Mar 2021 14:39:49 -0400 |
kvm_physical_sync_dirty_bitmap() calculates the ramblock offset in an
awkward way from the MemoryRegionSection that passed in from the
caller. The truth is for each KVMSlot the ramblock offset never
change for the lifecycle. Cache the ramblock offset for each KVMSlot
into the structure when the KVMSlot is created.
With that, we can further simplify kvm_physical_sync_dirty_bitmap()
with a helper to sync KVMSlot dirty bitmap to the ramblock dirty
bitmap of a specific KVMSlot.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
accel/kvm/kvm-all.c | 37 +++++++++++++++++--------------------
include/sysemu/kvm_int.h | 2 ++
2 files changed, 19 insertions(+), 20 deletions(-)
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 27460587a03..57a43e6a6b2 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -575,15 +575,12 @@ static void kvm_log_stop(MemoryListener *listener,
}
/* get kvm's dirty pages bitmap and update qemu's */
-static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
- unsigned long *bitmap)
+static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
{
- ram_addr_t start = section->offset_within_region +
- memory_region_get_ram_addr(section->mr);
- ram_addr_t pages = int128_get64(section->size) / qemu_real_host_page_size;
+ ram_addr_t start = slot->ram_start_offset;
+ ram_addr_t pages = slot->memory_size / qemu_real_host_page_size;
- cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
- return 0;
+ cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
}
#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
@@ -658,26 +655,19 @@ static void
kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
KVMState *s = kvm_state;
KVMSlot *mem;
hwaddr start_addr, size;
- hwaddr slot_size, slot_offset = 0;
+ hwaddr slot_size;
size = kvm_align_section(section, &start_addr);
while (size) {
- MemoryRegionSection subsection = *section;
-
slot_size = MIN(kvm_max_slot_size, size);
mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
if (!mem) {
/* We don't have a slot if we want to trap every access. */
return;
}
-
if (kvm_slot_get_dirty_log(s, mem)) {
- subsection.offset_within_region += slot_offset;
- subsection.size = int128_make64(slot_size);
- kvm_get_dirty_pages_log_range(&subsection, d.dirty_bitmap);
+ kvm_slot_sync_dirty_pages(mem);
}
-
- slot_offset += slot_size;
start_addr += slot_size;
size -= slot_size;
}
@@ -1136,7 +1126,8 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
int err;
MemoryRegion *mr = section->mr;
bool writeable = !mr->readonly && !mr->rom_device;
- hwaddr start_addr, size, slot_size;
+ hwaddr start_addr, size, slot_size, mr_offset;
+ ram_addr_t ram_start_offset;
void *ram;
if (!memory_region_is_ram(mr)) {
@@ -1154,9 +1145,13 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
return;
}
- /* use aligned delta to align the ram address */
- ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
- (start_addr - section->offset_within_address_space);
+ /* The offset of the kvmslot within the memory region */
+ mr_offset = section->offset_within_region + start_addr -
+ section->offset_within_address_space;
+
+ /* use aligned delta to align the ram address and offset */
+ ram = memory_region_get_ram_ptr(mr) + mr_offset;
+ ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
kvm_slots_lock();
@@ -1195,6 +1190,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
mem->as_id = kml->as_id;
mem->memory_size = slot_size;
mem->start_addr = start_addr;
+ mem->ram_start_offset = ram_start_offset;
mem->ram = ram;
mem->flags = kvm_mem_flags(mr);
kvm_slot_init_dirty_bitmap(mem);
@@ -1205,6 +1201,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
abort();
}
start_addr += slot_size;
+ ram_start_offset += slot_size;
ram += slot_size;
size -= slot_size;
} while (size);
diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
index e13075f738a..ab09a150e19 100644
--- a/include/sysemu/kvm_int.h
+++ b/include/sysemu/kvm_int.h
@@ -25,6 +25,8 @@ typedef struct KVMSlot
unsigned long *dirty_bmap;
/* Cache of the address space ID */
int as_id;
+ /* Cache of the offset in ram address space */
+ ram_addr_t ram_start_offset;
} KVMSlot;
typedef struct KVMMemoryListener {
--
2.26.2
- [PATCH v6 00/10] KVM: Dirty ring support (QEMU part), Peter Xu, 2021/03/24
- [PATCH v6 01/10] memory: Introduce log_sync_global() to memory listener, Peter Xu, 2021/03/24
- [PATCH v6 02/10] KVM: Use a big lock to replace per-kml slots_lock, Peter Xu, 2021/03/24
- [PATCH v6 04/10] KVM: Provide helper to get kvm dirty log, Peter Xu, 2021/03/24
- [PATCH v6 03/10] KVM: Create the KVMSlot dirty bitmap on flag changes, Peter Xu, 2021/03/24
- [PATCH v6 06/10] KVM: Simplify dirty log sync in kvm_set_phys_mem, Peter Xu, 2021/03/24
- [PATCH v6 07/10] KVM: Cache kvm slot dirty bitmap size, Peter Xu, 2021/03/24
- [PATCH v6 05/10] KVM: Provide helper to sync dirty bitmap from slot to ramblock,
Peter Xu <=
- [PATCH v6 09/10] KVM: Disable manual dirty log when dirty ring enabled, Peter Xu, 2021/03/24
- [PATCH v6 08/10] KVM: Add dirty-gfn-count property, Peter Xu, 2021/03/24
- [PATCH v6 10/10] KVM: Dirty ring support, Peter Xu, 2021/03/24