[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 1/2] cpu: expose qemu_cpu_list_lock for lock-guard use
From: |
Jamie Iles |
Subject: |
[PATCH v3 1/2] cpu: expose qemu_cpu_list_lock for lock-guard use |
Date: |
Thu, 27 Apr 2023 03:09:24 +0100 |
Expose qemu_cpu_list_lock globally so that we can use
WITH_QEMU_LOCK_GUARD and QEMU_LOCK_GUARD to simplify a few code paths
now and in future.
Signed-off-by: Jamie Iles <quic_jiles@quicinc.com>
---
cpus-common.c | 2 +-
include/exec/cpu-common.h | 1 +
linux-user/elfload.c | 12 ++++++------
migration/dirtyrate.c | 26 +++++++++++++-------------
trace/control-target.c | 9 ++++-----
5 files changed, 25 insertions(+), 25 deletions(-)
diff --git a/cpus-common.c b/cpus-common.c
index b0047e456f93..82d439add5c1 100644
--- a/cpus-common.c
+++ b/cpus-common.c
@@ -25,7 +25,7 @@
#include "qemu/lockable.h"
#include "trace/trace-root.h"
-static QemuMutex qemu_cpu_list_lock;
+QemuMutex qemu_cpu_list_lock;
static QemuCond exclusive_cond;
static QemuCond exclusive_resume;
static QemuCond qemu_work_cond;
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 6feaa40ca7b0..0c833d6ac9c6 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -32,6 +32,7 @@ extern intptr_t qemu_host_page_mask;
#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size())
/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
+extern QemuMutex qemu_cpu_list_lock;
void qemu_init_cpu_list(void);
void cpu_list_lock(void);
void cpu_list_unlock(void);
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 1dbc1f0f9baa..3ff16b163382 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -4238,14 +4238,14 @@ static int fill_note_info(struct elf_note_info *info,
info->notes_size += note_size(&info->notes[i]);
/* read and fill status of all threads */
- cpu_list_lock();
- CPU_FOREACH(cpu) {
- if (cpu == thread_cpu) {
- continue;
+ WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
+ CPU_FOREACH(cpu) {
+ if (cpu == thread_cpu) {
+ continue;
+ }
+ fill_thread_info(info, cpu->env_ptr);
}
- fill_thread_info(info, cpu->env_ptr);
}
- cpu_list_unlock();
return (0);
}
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index 180ba38c7a80..388337a33249 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -150,25 +150,25 @@ int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms,
retry:
init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
- cpu_list_lock();
- gen_id = cpu_list_generation_id_get();
- records = vcpu_dirty_stat_alloc(stat);
- vcpu_dirty_stat_collect(stat, records, true);
- cpu_list_unlock();
+ WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
+ gen_id = cpu_list_generation_id_get();
+ records = vcpu_dirty_stat_alloc(stat);
+ vcpu_dirty_stat_collect(stat, records, true);
+ }
duration = dirty_stat_wait(calc_time_ms, init_time_ms);
global_dirty_log_sync(flag, one_shot);
- cpu_list_lock();
- if (gen_id != cpu_list_generation_id_get()) {
- g_free(records);
- g_free(stat->rates);
- cpu_list_unlock();
- goto retry;
+ WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
+ if (gen_id != cpu_list_generation_id_get()) {
+ g_free(records);
+ g_free(stat->rates);
+ cpu_list_unlock();
+ goto retry;
+ }
+ vcpu_dirty_stat_collect(stat, records, false);
}
- vcpu_dirty_stat_collect(stat, records, false);
- cpu_list_unlock();
for (i = 0; i < stat->nvcpu; i++) {
dirtyrate = do_calculate_dirtyrate(records[i], duration);
diff --git a/trace/control-target.c b/trace/control-target.c
index 232c97a4a183..c0c1e2310a51 100644
--- a/trace/control-target.c
+++ b/trace/control-target.c
@@ -8,6 +8,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/lockable.h"
#include "cpu.h"
#include "trace/trace-root.h"
#include "trace/control.h"
@@ -116,11 +117,9 @@ static bool adding_first_cpu1(void)
static bool adding_first_cpu(void)
{
- bool res;
- cpu_list_lock();
- res = adding_first_cpu1();
- cpu_list_unlock();
- return res;
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
+
+ return adding_first_cpu1();
}
void trace_init_vcpu(CPUState *vcpu)
--
2.25.1