[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v6 02/10] KVM: Use a big lock to replace per-kml slots_lock
From: |
Peter Xu |
Subject: |
[PATCH v6 02/10] KVM: Use a big lock to replace per-kml slots_lock |
Date: |
Wed, 24 Mar 2021 14:39:46 -0400 |
Per-kml slots_lock will bring some trouble if we want to take all slots_lock of
all the KMLs, especially when we're in a context that we could have taken some
of the KML slots_lock, then we even need to figure out what we've taken and
what we need to take.
Make this simple by merging all KML slots_lock into a single slots lock.
Per-kml slots_lock isn't anything that helpful anyway - so far only x86 has two
address spaces (so, two slots_locks). All the rest archs will be having one
address space always, which means there's actually one slots_lock so it will be
the same as before.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
accel/kvm/kvm-all.c | 33 ++++++++++++++++++---------------
include/sysemu/kvm_int.h | 2 --
2 files changed, 18 insertions(+), 17 deletions(-)
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index b6d9f92f151..766a7ad8c72 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -174,8 +174,10 @@ typedef struct KVMResampleFd KVMResampleFd;
static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
-#define kvm_slots_lock(kml) qemu_mutex_lock(&(kml)->slots_lock)
-#define kvm_slots_unlock(kml) qemu_mutex_unlock(&(kml)->slots_lock)
+static QemuMutex kml_slots_lock;
+
+#define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock)
+#define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
static inline void kvm_resample_fd_remove(int gsi)
{
@@ -241,9 +243,9 @@ bool kvm_has_free_slot(MachineState *ms)
bool result;
KVMMemoryListener *kml = &s->memory_listener;
- kvm_slots_lock(kml);
+ kvm_slots_lock();
result = !!kvm_get_free_slot(kml);
- kvm_slots_unlock(kml);
+ kvm_slots_unlock();
return result;
}
@@ -309,7 +311,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void
*ram,
KVMMemoryListener *kml = &s->memory_listener;
int i, ret = 0;
- kvm_slots_lock(kml);
+ kvm_slots_lock();
for (i = 0; i < s->nr_slots; i++) {
KVMSlot *mem = &kml->slots[i];
@@ -319,7 +321,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void
*ram,
break;
}
}
- kvm_slots_unlock(kml);
+ kvm_slots_unlock();
return ret;
}
@@ -515,7 +517,7 @@ static int kvm_section_update_flags(KVMMemoryListener *kml,
return 0;
}
- kvm_slots_lock(kml);
+ kvm_slots_lock();
while (size && !ret) {
slot_size = MIN(kvm_max_slot_size, size);
@@ -531,7 +533,7 @@ static int kvm_section_update_flags(KVMMemoryListener *kml,
}
out:
- kvm_slots_unlock(kml);
+ kvm_slots_unlock();
return ret;
}
@@ -812,7 +814,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
return ret;
}
- kvm_slots_lock(kml);
+ kvm_slots_lock();
for (i = 0; i < s->nr_slots; i++) {
mem = &kml->slots[i];
@@ -838,7 +840,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
}
}
- kvm_slots_unlock(kml);
+ kvm_slots_unlock();
return ret;
}
@@ -1143,7 +1145,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
(start_addr - section->offset_within_address_space);
- kvm_slots_lock(kml);
+ kvm_slots_lock();
if (!add) {
do {
@@ -1201,7 +1203,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
} while (size);
out:
- kvm_slots_unlock(kml);
+ kvm_slots_unlock();
}
static void kvm_region_add(MemoryListener *listener,
@@ -1228,9 +1230,9 @@ static void kvm_log_sync(MemoryListener *listener,
KVMMemoryListener *kml = container_of(listener, KVMMemoryListener,
listener);
int r;
- kvm_slots_lock(kml);
+ kvm_slots_lock();
r = kvm_physical_sync_dirty_bitmap(kml, section);
- kvm_slots_unlock(kml);
+ kvm_slots_unlock();
if (r < 0) {
abort();
}
@@ -1330,7 +1332,6 @@ void kvm_memory_listener_register(KVMState *s,
KVMMemoryListener *kml,
{
int i;
- qemu_mutex_init(&kml->slots_lock);
kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
kml->as_id = as_id;
@@ -2003,6 +2004,8 @@ static int kvm_init(MachineState *ms)
int type = 0;
uint64_t dirty_log_manual_caps;
+ qemu_mutex_init(&kml_slots_lock);
+
s = KVM_STATE(ms->accelerator);
/*
diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
index ccb8869f01b..1da30e18841 100644
--- a/include/sysemu/kvm_int.h
+++ b/include/sysemu/kvm_int.h
@@ -27,8 +27,6 @@ typedef struct KVMSlot
typedef struct KVMMemoryListener {
MemoryListener listener;
- /* Protects the slots and all inside them */
- QemuMutex slots_lock;
KVMSlot *slots;
int as_id;
} KVMMemoryListener;
--
2.26.2
- [PATCH v6 00/10] KVM: Dirty ring support (QEMU part), Peter Xu, 2021/03/24
- [PATCH v6 01/10] memory: Introduce log_sync_global() to memory listener, Peter Xu, 2021/03/24
- [PATCH v6 02/10] KVM: Use a big lock to replace per-kml slots_lock,
Peter Xu <=
- [PATCH v6 04/10] KVM: Provide helper to get kvm dirty log, Peter Xu, 2021/03/24
- [PATCH v6 03/10] KVM: Create the KVMSlot dirty bitmap on flag changes, Peter Xu, 2021/03/24
- [PATCH v6 06/10] KVM: Simplify dirty log sync in kvm_set_phys_mem, Peter Xu, 2021/03/24
- [PATCH v6 07/10] KVM: Cache kvm slot dirty bitmap size, Peter Xu, 2021/03/24
- [PATCH v6 05/10] KVM: Provide helper to sync dirty bitmap from slot to ramblock, Peter Xu, 2021/03/24
- [PATCH v6 09/10] KVM: Disable manual dirty log when dirty ring enabled, Peter Xu, 2021/03/24
- [PATCH v6 08/10] KVM: Add dirty-gfn-count property, Peter Xu, 2021/03/24
- [PATCH v6 10/10] KVM: Dirty ring support, Peter Xu, 2021/03/24