[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH] main-loop: introduce WITH_QEMU_IOTHREAD_LOCK
From: |
Alex Bennée |
Subject: |
[RFC PATCH] main-loop: introduce WITH_QEMU_IOTHREAD_LOCK |
Date: |
Mon, 24 Oct 2022 18:19:09 +0100 |
This helper intends to ape our other auto-unlocking helpers with
WITH_QEMU_LOCK_GUARD. The principle difference is the iothread lock
is often nested needs a little extra book keeping to ensure we don't
double lock or unlock a lock taken higher up the call chain.
Convert some of the common routines that follow this pattern to use
the new wrapper.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
include/qemu/main-loop.h | 41 ++++++++++++++++++++++++++++++++++++++++
hw/core/cpu-common.c | 10 ++--------
util/rcu.c | 40 ++++++++++++++++-----------------------
ui/cocoa.m | 18 ++++--------------
4 files changed, 63 insertions(+), 46 deletions(-)
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
index aac707d073..604e1823da 100644
--- a/include/qemu/main-loop.h
+++ b/include/qemu/main-loop.h
@@ -341,6 +341,47 @@ void qemu_mutex_lock_iothread_impl(const char *file, int
line);
*/
void qemu_mutex_unlock_iothread(void);
+/**
+ * WITH_QEMU_IOTHREAD_LOCK - nested lock of iothread
+ *
+ * This is a specialised form of WITH_QEMU_LOCK_GUARD which is used to
+ * safely encapsulate code that needs the BQL. The main difference is
+ * the BQL is often nested so we need to save the state of it on entry
+ * so we know if we need to free it once we leave the scope of the gaurd.
+ */
+
+typedef struct {
+ bool taken;
+} IoThreadLocked;
+
+static inline IoThreadLocked * qemu_iothread_auto_lock(IoThreadLocked *x)
+{
+ bool locked = qemu_mutex_iothread_locked();
+ if (!locked) {
+ qemu_mutex_lock_iothread();
+ x->taken = true;
+ }
+ return x;
+}
+
+static inline void qemu_iothread_auto_unlock(IoThreadLocked *x)
+{
+ if (x->taken) {
+ qemu_mutex_unlock_iothread();
+ }
+}
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(IoThreadLocked, qemu_iothread_auto_unlock)
+
+#define WITH_QEMU_IOTHREAD_LOCK_(var) \
+ for (g_autoptr(IoThreadLocked) var = \
+ qemu_iothread_auto_lock(&(IoThreadLocked) {}); \
+ var; \
+ qemu_iothread_auto_unlock(var), var = NULL)
+
+#define WITH_QEMU_IOTHREAD_LOCK \
+ WITH_QEMU_IOTHREAD_LOCK_(glue(qemu_lockable_auto, __COUNTER__))
+
/*
* qemu_cond_wait_iothread: Wait on condition for the main loop mutex
*
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
index f9fdd46b9d..0a60f916a9 100644
--- a/hw/core/cpu-common.c
+++ b/hw/core/cpu-common.c
@@ -70,14 +70,8 @@ CPUState *cpu_create(const char *typename)
* BQL here if we need to. cpu_interrupt assumes it is held.*/
void cpu_reset_interrupt(CPUState *cpu, int mask)
{
- bool need_lock = !qemu_mutex_iothread_locked();
-
- if (need_lock) {
- qemu_mutex_lock_iothread();
- }
- cpu->interrupt_request &= ~mask;
- if (need_lock) {
- qemu_mutex_unlock_iothread();
+ WITH_QEMU_IOTHREAD_LOCK {
+ cpu->interrupt_request &= ~mask;
}
}
diff --git a/util/rcu.c b/util/rcu.c
index b6d6c71cff..02e7491de1 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -320,35 +320,27 @@ static void drain_rcu_callback(struct rcu_head *node)
void drain_call_rcu(void)
{
struct rcu_drain rcu_drain;
- bool locked = qemu_mutex_iothread_locked();
memset(&rcu_drain, 0, sizeof(struct rcu_drain));
qemu_event_init(&rcu_drain.drain_complete_event, false);
- if (locked) {
- qemu_mutex_unlock_iothread();
- }
-
-
- /*
- * RCU callbacks are invoked in the same order as in which they
- * are registered, thus we can be sure that when 'drain_rcu_callback'
- * is called, all RCU callbacks that were registered on this thread
- * prior to calling this function are completed.
- *
- * Note that since we have only one global queue of the RCU callbacks,
- * we also end up waiting for most of RCU callbacks that were registered
- * on the other threads, but this is a side effect that shoudn't be
- * assumed.
- */
-
- qatomic_inc(&in_drain_call_rcu);
- call_rcu1(&rcu_drain.rcu, drain_rcu_callback);
- qemu_event_wait(&rcu_drain.drain_complete_event);
- qatomic_dec(&in_drain_call_rcu);
+ WITH_QEMU_IOTHREAD_LOCK {
+ /*
+ * RCU callbacks are invoked in the same order as in which they
+ * are registered, thus we can be sure that when 'drain_rcu_callback'
+ * is called, all RCU callbacks that were registered on this thread
+ * prior to calling this function are completed.
+ *
+ * Note that since we have only one global queue of the RCU callbacks,
+ * we also end up waiting for most of RCU callbacks that were
registered
+ * on the other threads, but this is a side effect that shoudn't be
+ * assumed.
+ */
- if (locked) {
- qemu_mutex_lock_iothread();
+ qatomic_inc(&in_drain_call_rcu);
+ call_rcu1(&rcu_drain.rcu, drain_rcu_callback);
+ qemu_event_wait(&rcu_drain.drain_complete_event);
+ qatomic_dec(&in_drain_call_rcu);
}
}
diff --git a/ui/cocoa.m b/ui/cocoa.m
index 660d3e0935..f8bd315bdd 100644
--- a/ui/cocoa.m
+++ b/ui/cocoa.m
@@ -115,27 +115,17 @@ static void cocoa_switch(DisplayChangeListener *dcl,
static void with_iothread_lock(CodeBlock block)
{
- bool locked = qemu_mutex_iothread_locked();
- if (!locked) {
- qemu_mutex_lock_iothread();
- }
- block();
- if (!locked) {
- qemu_mutex_unlock_iothread();
+ WITH_QEMU_IOTHREAD_LOCK {
+ block();
}
}
static bool bool_with_iothread_lock(BoolCodeBlock block)
{
- bool locked = qemu_mutex_iothread_locked();
bool val;
- if (!locked) {
- qemu_mutex_lock_iothread();
- }
- val = block();
- if (!locked) {
- qemu_mutex_unlock_iothread();
+ WITH_QEMU_IOTHREAD_LOCK {
+ val = block();
}
return val;
}
--
2.34.1
- [RFC PATCH] main-loop: introduce WITH_QEMU_IOTHREAD_LOCK,
Alex Bennée <=