[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC v3 4/8] pc: fix QEMU crashing when more than ~50 memor
From: |
Igor Mammedov |
Subject: |
[Qemu-devel] [RFC v3 4/8] pc: fix QEMU crashing when more than ~50 memory hotplugged |
Date: |
Wed, 8 Jul 2015 11:46:44 +0200 |
QEMU asserts in vhost due to hitting vhost backend limit
on number of supported memory regions.
Describe all hotplugged memory as one continuos range
to vhost with linear 1:1 HVA->GPA mapping in backend.
Signed-off-by: Igor Mammedov <address@hidden>
---
hw/virtio/vhost.c | 47 ++++++++++++++++++++++++++++++++++++++++++++---
include/hw/virtio/vhost.h | 1 +
2 files changed, 45 insertions(+), 3 deletions(-)
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 2712c6f..7bc27f0 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -432,6 +432,10 @@ static void vhost_set_memory(MemoryListener *listener,
assert(size);
+ if (!dev->rsvd_hva.mr) {
+ dev->rsvd_hva = memory_region_find_hva_range(section->mr);
+ }
+
/* Optimize no-change case. At least cirrus_vga does this a lot at this
time. */
ram = memory_region_get_ram_ptr(section->mr) +
section->offset_within_region;
if (add) {
@@ -472,6 +476,42 @@ static void vhost_begin(MemoryListener *listener)
dev->mem_changed_start_addr = -1;
}
+static int vhost_set_mem_table(struct vhost_dev *dev)
+{
+ hwaddr start_addr = 0;
+ ram_addr_t size = 0;
+ struct vhost_memory *mem;
+ int r, i;
+
+ /* drop memory ranges from continuos HVA */
+ mem = g_memdup(dev->mem, offsetof(struct vhost_memory, regions) +
+ dev->mem->nregions * sizeof dev->mem->regions[0]);
+ start_addr = dev->rsvd_hva.offset_within_address_space;
+ size = int128_get64(dev->rsvd_hva.size);
+ for (i = 0; i < mem->nregions; i++) {
+ if (mem->regions[i].guest_phys_addr >= start_addr &&
+ mem->regions[i].guest_phys_addr < start_addr + size) {
+ mem->nregions--;
+ memmove(&mem->regions[i], &mem->regions[i + 1],
+ (mem->nregions - i) * sizeof mem->regions[0]);
+ }
+ }
+ /* add one continuos HVA entry if memory ranges from it is present */
+ if (dev->mem->nregions > mem->nregions) {
+ struct vhost_memory_region *reg = &mem->regions[mem->nregions];
+
+ reg->guest_phys_addr = start_addr;
+ reg->memory_size = size;
+ reg->userspace_addr =
+ (__u64)memory_region_get_ram_ptr(dev->rsvd_hva.mr);
+ mem->nregions++;
+ }
+
+ r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, mem);
+ g_free(mem);
+ return r;
+}
+
static void vhost_commit(MemoryListener *listener)
{
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
@@ -500,7 +540,7 @@ static void vhost_commit(MemoryListener *listener)
}
if (!dev->log_enabled) {
- r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
+ r = vhost_set_mem_table(dev);
assert(r >= 0);
dev->memory_changed = false;
return;
@@ -513,7 +553,7 @@ static void vhost_commit(MemoryListener *listener)
if (dev->log_size < log_size) {
vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
}
- r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem);
+ r = vhost_set_mem_table(dev);
assert(r >= 0);
/* To log less, can only decrease log size after table update. */
if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
@@ -956,6 +996,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
migrate_add_blocker(hdev->migration_blocker);
}
hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
+ memset(&hdev->rsvd_hva, 0, sizeof hdev->rsvd_hva);
hdev->n_mem_sections = 0;
hdev->mem_sections = NULL;
hdev->log = NULL;
@@ -1119,7 +1160,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice
*vdev)
if (r < 0) {
goto fail_features;
}
- r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem);
+ r = vhost_set_mem_table(hdev);
if (r < 0) {
r = -errno;
goto fail_mem;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index dd51050..d41bf2f 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -40,6 +40,7 @@ struct vhost_dev {
struct vhost_memory *mem;
int n_mem_sections;
MemoryRegionSection *mem_sections;
+ MemoryRegionSection rsvd_hva;
struct vhost_virtqueue *vqs;
int nvqs;
/* the first virtqueue which would be used by this vhost dev */
--
1.8.3.1
- [Qemu-devel] [RFC v3 0/8] Fix QEMU crash during memory hotplug with vhost=on, Igor Mammedov, 2015/07/08
- [Qemu-devel] [RFC v3 1/8] memory: get rid of memory_region_destructor_ram_from_ptr(), Igor Mammedov, 2015/07/08
- [Qemu-devel] [RFC v3 2/8] memory: introduce MemoryRegion container with reserved HVA range, Igor Mammedov, 2015/07/08
- [Qemu-devel] [RFC v3 3/8] pc: reserve hotpluggable memory range with memory_region_init_hva_range(), Igor Mammedov, 2015/07/08
- [Qemu-devel] [RFC v3 5/8] exec: make sure that RAMBlock descriptor won't be leaked, Igor Mammedov, 2015/07/08
- [Qemu-devel] [RFC v3 6/8] exec: add qemu_ram_unmap_hva() API for unmapping memory from HVA area, Igor Mammedov, 2015/07/08
- [Qemu-devel] [RFC v3 4/8] pc: fix QEMU crashing when more than ~50 memory hotplugged,
Igor Mammedov <=
- [Qemu-devel] [RFC v3 8/8] memory: add support for deleting HVA mapped MemoryRegion, Igor Mammedov, 2015/07/08
- [Qemu-devel] [RFC v3 7/8] memory: extend memory_region_add_subregion() to support error reporting, Igor Mammedov, 2015/07/08