[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 10/10] block-copy: atomic .cancelled and .finished fields in Block
From: |
Vladimir Sementsov-Ogievskiy |
Subject: |
[PULL 10/10] block-copy: atomic .cancelled and .finished fields in BlockCopyCallState |
Date: |
Fri, 25 Jun 2021 16:00:06 +0300 |
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
By adding acquire/release pairs, we ensure that .ret and .error_is_read
fields are written by block_copy_dirty_clusters before .finished is true,
and that they are read by API user after .finished is true.
The atomic here are necessary because the fields are concurrently modified
in coroutines, and read outside.
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Message-Id: <20210624072043.180494-6-eesposit@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
include/block/block-copy.h | 2 ++
block/block-copy.c | 37 ++++++++++++++++++++++---------------
2 files changed, 24 insertions(+), 15 deletions(-)
diff --git a/include/block/block-copy.h b/include/block/block-copy.h
index 338f2ea7fd..5c8278895c 100644
--- a/include/block/block-copy.h
+++ b/include/block/block-copy.h
@@ -18,6 +18,8 @@
#include "block/block.h"
#include "qemu/co-shared-resource.h"
+/* All APIs are thread-safe */
+
typedef void (*BlockCopyAsyncCallbackFunc)(void *opaque);
typedef struct BlockCopyState BlockCopyState;
typedef struct BlockCopyCallState BlockCopyCallState;
diff --git a/block/block-copy.c b/block/block-copy.c
index f3550d0825..0becad52da 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -52,9 +52,9 @@ typedef struct BlockCopyCallState {
Coroutine *co;
/* Fields whose state changes throughout the execution */
- bool finished;
+ bool finished; /* atomic */
QemuCoSleep sleep; /* TODO: protect API with a lock */
- bool cancelled;
+ bool cancelled; /* atomic */
/* To reference all call states from BlockCopyState */
QLIST_ENTRY(BlockCopyCallState) list;
@@ -667,7 +667,8 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state)
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
- while (bytes && aio_task_pool_status(aio) == 0 && !call_state->cancelled) {
+ while (bytes && aio_task_pool_status(aio) == 0 &&
+ !qatomic_read(&call_state->cancelled)) {
BlockCopyTask *task;
int64_t status_bytes;
@@ -779,7 +780,7 @@ static int coroutine_fn
block_copy_common(BlockCopyCallState *call_state)
do {
ret = block_copy_dirty_clusters(call_state);
- if (ret == 0 && !call_state->cancelled) {
+ if (ret == 0 && !qatomic_read(&call_state->cancelled)) {
WITH_QEMU_LOCK_GUARD(&s->lock) {
/*
* Check that there is no task we still need to
@@ -815,9 +816,9 @@ static int coroutine_fn
block_copy_common(BlockCopyCallState *call_state)
* 2. We have waited for some intersecting block-copy request
* It may have failed and produced new dirty bits.
*/
- } while (ret > 0 && !call_state->cancelled);
+ } while (ret > 0 && !qatomic_read(&call_state->cancelled));
- call_state->finished = true;
+ qatomic_store_release(&call_state->finished, true);
if (call_state->cb) {
call_state->cb(call_state->cb_opaque);
@@ -880,44 +881,50 @@ void block_copy_call_free(BlockCopyCallState *call_state)
return;
}
- assert(call_state->finished);
+ assert(qatomic_read(&call_state->finished));
g_free(call_state);
}
bool block_copy_call_finished(BlockCopyCallState *call_state)
{
- return call_state->finished;
+ return qatomic_read(&call_state->finished);
}
bool block_copy_call_succeeded(BlockCopyCallState *call_state)
{
- return call_state->finished && !call_state->cancelled &&
- call_state->ret == 0;
+ return qatomic_load_acquire(&call_state->finished) &&
+ !qatomic_read(&call_state->cancelled) &&
+ call_state->ret == 0;
}
bool block_copy_call_failed(BlockCopyCallState *call_state)
{
- return call_state->finished && !call_state->cancelled &&
- call_state->ret < 0;
+ return qatomic_load_acquire(&call_state->finished) &&
+ !qatomic_read(&call_state->cancelled) &&
+ call_state->ret < 0;
}
bool block_copy_call_cancelled(BlockCopyCallState *call_state)
{
- return call_state->cancelled;
+ return qatomic_read(&call_state->cancelled);
}
int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read)
{
- assert(call_state->finished);
+ assert(qatomic_load_acquire(&call_state->finished));
if (error_is_read) {
*error_is_read = call_state->error_is_read;
}
return call_state->ret;
}
+/*
+ * Note that cancelling and finishing are racy.
+ * User can cancel a block-copy that is already finished.
+ */
void block_copy_call_cancel(BlockCopyCallState *call_state)
{
- call_state->cancelled = true;
+ qatomic_set(&call_state->cancelled, true);
block_copy_kick(call_state);
}
--
2.29.2
- [PULL 00/10] Block Jobs patches, Vladimir Sementsov-Ogievskiy, 2021/06/25
- [PULL 01/10] ratelimit: treat zero speed as unlimited, Vladimir Sementsov-Ogievskiy, 2021/06/25
- [PULL 07/10] block-copy: streamline choice of copy_range vs. read/write, Vladimir Sementsov-Ogievskiy, 2021/06/25
- [PULL 03/10] blockjob: let ratelimit handle a speed of 0, Vladimir Sementsov-Ogievskiy, 2021/06/25
- [PULL 08/10] block-copy: move progress_set_remaining in block_copy_task_end, Vladimir Sementsov-Ogievskiy, 2021/06/25
- [PULL 05/10] co-shared-resource: protect with a mutex, Vladimir Sementsov-Ogievskiy, 2021/06/25
- [PULL 04/10] progressmeter: protect with a mutex, Vladimir Sementsov-Ogievskiy, 2021/06/25
- [PULL 06/10] block-copy: small refactor in block_copy_task_entry and block_copy_common, Vladimir Sementsov-Ogievskiy, 2021/06/25
- [PULL 10/10] block-copy: atomic .cancelled and .finished fields in BlockCopyCallState,
Vladimir Sementsov-Ogievskiy <=
- [PULL 02/10] block-copy: let ratelimit handle a speed of 0, Vladimir Sementsov-Ogievskiy, 2021/06/25
- [PULL 09/10] block-copy: add CoMutex lock, Vladimir Sementsov-Ogievskiy, 2021/06/25
- Re: [PULL 00/10] Block Jobs patches, Peter Maydell, 2021/06/28
- Re: [PULL 00/10] Block Jobs patches, Peter Maydell, 2021/06/28