[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 09/10] vfio: Support host translation granule size
From: |
Alex Williamson |
Subject: |
[PULL 09/10] vfio: Support host translation granule size |
Date: |
Tue, 16 Mar 2021 11:07:39 -0600 |
User-agent: |
StGit/0.23 |
From: Kunkun Jiang <jiangkunkun@huawei.com>
The cpu_physical_memory_set_dirty_lebitmap() can quickly deal with
the dirty pages of memory by bitmap-traveling, regardless of whether
the bitmap is aligned correctly or not.
cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
host page size. So it'd better to set bitmap_pgsize to host page size
to support more translation granule sizes.
[aw: The Fixes commit below introduced code to restrict migration
support to configurations where the target page size intersects the
host dirty page support. For example, a 4K guest on a 4K host.
Due to the above flexibility in bitmap handling, this restriction
unnecessarily prevents mixed target/host pages size that could
otherwise be supported. Use host page size for dirty bitmap.]
Fixes: 87ea529c502 ("vfio: Get migration capability flags for container")
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
Message-Id: <20210304133446.1521-1-jiangkunkun@huawei.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
---
hw/vfio/common.c | 48 +++++++++++++++++++++++++-----------------------
1 file changed, 25 insertions(+), 23 deletions(-)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 35a41fd05251..ad08dfd729b9 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -378,7 +378,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
{
struct vfio_iommu_type1_dma_unmap *unmap;
struct vfio_bitmap *bitmap;
- uint64_t pages = TARGET_PAGE_ALIGN(size) >> TARGET_PAGE_BITS;
+ uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size;
int ret;
unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
@@ -390,12 +390,12 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
bitmap = (struct vfio_bitmap *)&unmap->data;
/*
- * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
- * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap_pgsize to
- * TARGET_PAGE_SIZE.
+ * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
+ * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
+ * to qemu_real_host_page_size.
*/
- bitmap->pgsize = TARGET_PAGE_SIZE;
+ bitmap->pgsize = qemu_real_host_page_size;
bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
BITS_PER_BYTE;
@@ -674,16 +674,17 @@ static void vfio_listener_region_add(MemoryListener
*listener,
return;
}
- if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
- (section->offset_within_region & ~TARGET_PAGE_MASK))) {
+ if (unlikely((section->offset_within_address_space &
+ ~qemu_real_host_page_mask) !=
+ (section->offset_within_region & ~qemu_real_host_page_mask)))
{
error_report("%s received unaligned region", __func__);
return;
}
- iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
+ iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size);
- llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
+ llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask));
if (int128_ge(int128_make64(iova), llend)) {
return;
@@ -892,8 +893,9 @@ static void vfio_listener_region_del(MemoryListener
*listener,
return;
}
- if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
- (section->offset_within_region & ~TARGET_PAGE_MASK))) {
+ if (unlikely((section->offset_within_address_space &
+ ~qemu_real_host_page_mask) !=
+ (section->offset_within_region & ~qemu_real_host_page_mask)))
{
error_report("%s received unaligned region", __func__);
return;
}
@@ -921,10 +923,10 @@ static void vfio_listener_region_del(MemoryListener
*listener,
*/
}
- iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
+ iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size);
- llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
+ llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask));
if (int128_ge(int128_make64(iova), llend)) {
return;
@@ -1004,13 +1006,13 @@ static int vfio_get_dirty_bitmap(VFIOContainer
*container, uint64_t iova,
range->size = size;
/*
- * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
- * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap's pgsize to
- * TARGET_PAGE_SIZE.
+ * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
+ * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
+ * to qemu_real_host_page_size.
*/
- range->bitmap.pgsize = TARGET_PAGE_SIZE;
+ range->bitmap.pgsize = qemu_real_host_page_size;
- pages = TARGET_PAGE_ALIGN(range->size) >> TARGET_PAGE_BITS;
+ pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size;
range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
BITS_PER_BYTE;
range->bitmap.data = g_try_malloc0(range->bitmap.size);
@@ -1114,8 +1116,8 @@ static int vfio_sync_dirty_bitmap(VFIOContainer
*container,
section->offset_within_region;
return vfio_get_dirty_bitmap(container,
- TARGET_PAGE_ALIGN(section->offset_within_address_space),
- int128_get64(section->size), ram_addr);
+ REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
+ int128_get64(section->size), ram_addr);
}
static void vfio_listener_log_sync(MemoryListener *listener,
@@ -1655,10 +1657,10 @@ static void vfio_get_iommu_info_migration(VFIOContainer
*container,
header);
/*
- * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
- * TARGET_PAGE_SIZE to mark those dirty.
+ * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
+ * qemu_real_host_page_size to mark those dirty.
*/
- if (cap_mig->pgsize_bitmap & TARGET_PAGE_SIZE) {
+ if (cap_mig->pgsize_bitmap & qemu_real_host_page_size) {
container->dirty_pages_supported = true;
container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
container->dirty_pgsizes = cap_mig->pgsize_bitmap;
- [PULL 00/10] VFIO updates for QEMU 6.0, Alex Williamson, 2021/03/16
- [PULL 01/10] vfio: Fix vfio_listener_log_sync function name typo, Alex Williamson, 2021/03/16
- [PULL 02/10] hw/vfio/pci-quirks: Replace the word 'blacklist', Alex Williamson, 2021/03/16
- [PULL 03/10] MAINTAINERS: Cover docs/igd-assign.txt in VFIO section, Alex Williamson, 2021/03/16
- [PULL 04/10] vfio: Do not register any IOMMU_NOTIFIER_DEVIOTLB_UNMAP notifier, Alex Williamson, 2021/03/16
- [PULL 05/10] spapr_iommu: Fix vhost integration regression, Alex Williamson, 2021/03/16
- [PULL 06/10] vfio: Move the saving of the config space to the right place in VFIO migration, Alex Williamson, 2021/03/16
- [PULL 07/10] vfio: Set the priority of the VFIO VM state change handler explicitly, Alex Williamson, 2021/03/16
- [PULL 08/10] vfio: Avoid disabling and enabling vectors repeatedly in VFIO migration, Alex Williamson, 2021/03/16
- [PULL 09/10] vfio: Support host translation granule size,
Alex Williamson <=
- [PULL 10/10] vfio/migrate: Move switch of dirty tracking into vfio_memory_listener, Alex Williamson, 2021/03/16
- Re: [PULL 00/10] VFIO updates for QEMU 6.0, Peter Maydell, 2021/03/17