[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 11/35] convert CoMutex to stackless coroutines
From: |
Paolo Bonzini |
Subject: |
[PATCH 11/35] convert CoMutex to stackless coroutines |
Date: |
Thu, 10 Mar 2022 13:43:49 +0100 |
Build the frame for qemu_co_mutex_lock_slowpath, because it has code
that runs after qemu_coroutine_yield(). For qemu_co_mutex_lock() and
qemu_co_mutex_unlock(), just return COROUTINE_CONTINUE on paths that do
not go through an awaitable function, which is all of them in the case
of qemu_co_mutex_unlock().
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
util/qemu-coroutine-lock.c | 60 ++++++++++++++++++++++++++++++--------
1 file changed, 48 insertions(+), 12 deletions(-)
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
index 048cfcea71..061a376aa4 100644
--- a/util/qemu-coroutine-lock.c
+++ b/util/qemu-coroutine-lock.c
@@ -120,6 +120,7 @@ bool qemu_co_queue_empty(CoQueue *queue)
{
return QSIMPLEQ_FIRST(&queue->entries) == NULL;
}
+#endif
/* The wait records are handled with a multiple-producer, single-consumer
* lock-free queue. There cannot be two concurrent pop_waiter() calls
@@ -197,15 +198,28 @@ static void coroutine_fn qemu_co_mutex_wake(CoMutex
*mutex, Coroutine *co)
aio_co_wake(co);
}
-static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
- CoMutex *mutex)
-{
- Coroutine *self = qemu_coroutine_self();
+struct FRAME__qemu_co_mutex_lock_slowpath {
+ CoroutineFrame common;
+ uint32_t _step;
+ AioContext *ctx;
+ CoMutex *mutex;
+ Coroutine *self;
CoWaitRecord w;
+};
+
+static CoroutineAction co__qemu_co_mutex_lock_slowpath(void *_frame)
+{
+ struct FRAME__qemu_co_mutex_lock_slowpath *_f = _frame;
+ AioContext *ctx = _f->ctx;
+ CoMutex *mutex = _f->mutex;
+ Coroutine *self;
unsigned old_handoff;
+switch(_f->_step) {
+case 0: {
+ self = qemu_coroutine_self();
trace_qemu_co_mutex_lock_entry(mutex, self);
- push_waiter(mutex, &w);
+ push_waiter(mutex, &_f->w);
/* This is the "Responsibility Hand-Off" protocol; a lock() picks from
* a concurrent unlock() the responsibility of waking somebody up.
@@ -221,21 +235,40 @@ static void coroutine_fn
qemu_co_mutex_lock_slowpath(AioContext *ctx,
Coroutine *co = to_wake->co;
if (co == self) {
/* We got the lock ourselves! */
- assert(to_wake == &w);
+ assert(to_wake == &_f->w);
mutex->ctx = ctx;
- return;
+ goto _out;
}
qemu_co_mutex_wake(mutex, co);
}
- qemu_coroutine_yield();
+_f->_step = 1;
+_f->self = self;
+ return qemu_coroutine_yield();
+}
+case 1:
+self = _f->self;
trace_qemu_co_mutex_lock_return(mutex, self);
mutex->holder = self;
self->locks_held++;
+ goto _out;
+}
+_out:
+return stack_free(&_f->common);
}
-void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
+static CoroutineAction qemu_co_mutex_lock_slowpath(AioContext *ctx, CoMutex
*mutex)
+{
+ struct FRAME__qemu_co_mutex_lock_slowpath *f;
+ f = stack_alloc(co__qemu_co_mutex_lock_slowpath, sizeof(*f));
+ f->ctx = ctx;
+ f->mutex = mutex;
+ f->_step = 0;
+ return co__qemu_co_mutex_lock_slowpath(f);
+}
+
+CoroutineAction qemu_co_mutex_lock(CoMutex *mutex)
{
AioContext *ctx = qemu_get_current_aio_context();
Coroutine *self = qemu_coroutine_self();
@@ -270,12 +303,13 @@ retry_fast_path:
mutex->ctx = ctx;
mutex->holder = self;
self->locks_held++;
+ return COROUTINE_CONTINUE;
} else {
- qemu_co_mutex_lock_slowpath(ctx, mutex);
+ return qemu_co_mutex_lock_slowpath(ctx, mutex);
}
}
-void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
+CoroutineAction qemu_co_mutex_unlock(CoMutex *mutex)
{
Coroutine *self = qemu_coroutine_self();
@@ -290,7 +324,7 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
self->locks_held--;
if (qatomic_fetch_dec(&mutex->locked) == 1) {
/* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */
- return;
+ return COROUTINE_CONTINUE;
}
for (;;) {
@@ -328,8 +362,10 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
}
trace_qemu_co_mutex_unlock_return(mutex, self);
+ return COROUTINE_CONTINUE;
}
+#if 0
struct CoRwTicket {
bool read;
Coroutine *co;
--
2.35.1
- [PATCH 09/35] convert qemu-coroutine-sleep.c to stackless coroutines, (continued)
- [PATCH 09/35] convert qemu-coroutine-sleep.c to stackless coroutines, Paolo Bonzini, 2022/03/10
- [PATCH 14/35] /basic/nesting, Paolo Bonzini, 2022/03/10
- [PATCH 03/35] coroutine: introduce QemuCoLockable, Paolo Bonzini, 2022/03/10
- [PATCH 07/35] coroutine: introduce the "stackless coroutine" backend, Paolo Bonzini, 2022/03/10
- [PATCH 16/35] /basic/entered, Paolo Bonzini, 2022/03/10
- [PATCH 17/35] /basic/in_coroutine, Paolo Bonzini, 2022/03/10
- [PATCH 04/35] coroutine: introduce coroutine_only_fn, Paolo Bonzini, 2022/03/10
- [PATCH 06/35] disable some code, Paolo Bonzini, 2022/03/10
- [PATCH 21/35] /perf/yield, Paolo Bonzini, 2022/03/10
- [PATCH 26/35] convert qemu_co_mutex_lock_slowpath to magic macros, Paolo Bonzini, 2022/03/10
- [PATCH 11/35] convert CoMutex to stackless coroutines,
Paolo Bonzini <=
- [PATCH 13/35] /basic/yield, Paolo Bonzini, 2022/03/10
- [PATCH 10/35] enable tail call optimization of qemu_co_mutex_lock, Paolo Bonzini, 2022/03/10
- [PATCH 20/35] /perf/nesting, Paolo Bonzini, 2022/03/10
- [PATCH 18/35] /basic/order, Paolo Bonzini, 2022/03/10
- [PATCH 27/35] /locking/co-mutex/lockable, Paolo Bonzini, 2022/03/10
- [PATCH 23/35] /perf/cost, Paolo Bonzini, 2022/03/10
- [PATCH 12/35] define magic macros for stackless coroutines, Paolo Bonzini, 2022/03/10
- [PATCH 15/35] /basic/self, Paolo Bonzini, 2022/03/10
- [PATCH 19/35] /perf/lifecycle, Paolo Bonzini, 2022/03/10
- [PATCH 22/35] /perf/function-call, Paolo Bonzini, 2022/03/10