[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 3/6] block/block-copy: refactor copying
From: |
Vladimir Sementsov-Ogievskiy |
Subject: |
[PATCH v3 3/6] block/block-copy: refactor copying |
Date: |
Tue, 22 Oct 2019 14:18:02 +0300 |
Merge copying code into one function block_copy_do_copy, which only
calls bdrv_ io functions and don't do any synchronization (like dirty
bitmap set/reset).
Refactor block_copy() function so that it takes full decision about
size of chunk to be copied and does all the synchronization (checking
intersecting requests, set/reset dirty bitmaps).
It will help:
- introduce parallel processing of block_copy iterations: we need to
calculate chunk size, start async chunk copying and go to the next
iteration
- simplify synchronization improvement (like memory limiting in
further commit and reducing critical section (now we lock the whole
requested range, when actually we need to lock only dirty region
which we handle at the moment))
Signed-off-by: Vladimir Sementsov-Ogievskiy <address@hidden>
Reviewed-by: Max Reitz <address@hidden>
---
block/block-copy.c | 118 ++++++++++++++++++++-------------------------
block/trace-events | 6 +--
2 files changed, 54 insertions(+), 70 deletions(-)
diff --git a/block/block-copy.c b/block/block-copy.c
index cd45b373a9..845b2423dc 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -126,79 +126,64 @@ void block_copy_set_callbacks(
}
/*
- * Copy range to target with a bounce buffer and return the bytes copied. If
- * error occurred, return a negative error number
+ * block_copy_do_copy
+ *
+ * Do copy of cluser-aligned chunk. @end is allowed to exceed s->len only to
+ * cover last cluster when s->len is not aligned to clusters.
+ *
+ * No sync here: nor bitmap neighter intersecting requests handling, only copy.
+ *
+ * Returns 0 on success.
*/
-static int coroutine_fn block_copy_with_bounce_buffer(BlockCopyState *s,
- int64_t start,
- int64_t end,
- bool *error_is_read)
+static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
+ int64_t start, int64_t end,
+ bool *error_is_read)
{
int ret;
- int nbytes;
- void *bounce_buffer = qemu_blockalign(s->source->bs, s->cluster_size);
+ int nbytes = MIN(end, s->len) - start;
+ void *bounce_buffer = NULL;
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
- bdrv_reset_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
- nbytes = MIN(s->cluster_size, s->len - start);
+ assert(QEMU_IS_ALIGNED(end, s->cluster_size));
+ assert(end < s->len || end == QEMU_ALIGN_UP(s->len, s->cluster_size));
+
+ if (s->use_copy_range) {
+ ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
+ 0, s->write_flags);
+ if (ret < 0) {
+ trace_block_copy_copy_range_fail(s, start, ret);
+ s->use_copy_range = false;
+ /* Fallback to read+write with allocated buffer */
+ } else {
+ goto out;
+ }
+ }
+
+ bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
ret = bdrv_co_pread(s->source, start, nbytes, bounce_buffer, 0);
if (ret < 0) {
- trace_block_copy_with_bounce_buffer_read_fail(s, start, ret);
+ trace_block_copy_read_fail(s, start, ret);
if (error_is_read) {
*error_is_read = true;
}
- goto fail;
+ goto out;
}
ret = bdrv_co_pwrite(s->target, start, nbytes, bounce_buffer,
s->write_flags);
if (ret < 0) {
- trace_block_copy_with_bounce_buffer_write_fail(s, start, ret);
+ trace_block_copy_write_fail(s, start, ret);
if (error_is_read) {
*error_is_read = false;
}
- goto fail;
+ goto out;
}
+out:
qemu_vfree(bounce_buffer);
- return nbytes;
-fail:
- qemu_vfree(bounce_buffer);
- bdrv_set_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
return ret;
-
-}
-
-/*
- * Copy range to target and return the bytes copied. If error occurred, return
a
- * negative error number.
- */
-static int coroutine_fn block_copy_with_offload(BlockCopyState *s,
- int64_t start,
- int64_t end)
-{
- int ret;
- int nr_clusters;
- int nbytes;
-
- assert(QEMU_IS_ALIGNED(s->copy_range_size, s->cluster_size));
- assert(QEMU_IS_ALIGNED(start, s->cluster_size));
- nbytes = MIN(s->copy_range_size, MIN(end, s->len) - start);
- nr_clusters = DIV_ROUND_UP(nbytes, s->cluster_size);
- bdrv_reset_dirty_bitmap(s->copy_bitmap, start,
- s->cluster_size * nr_clusters);
- ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
- 0, s->write_flags);
- if (ret < 0) {
- trace_block_copy_with_offload_fail(s, start, ret);
- bdrv_set_dirty_bitmap(s->copy_bitmap, start,
- s->cluster_size * nr_clusters);
- return ret;
- }
-
- return nbytes;
}
/*
@@ -294,7 +279,7 @@ int coroutine_fn block_copy(BlockCopyState *s,
block_copy_inflight_req_begin(s, &req, start, end);
while (start < end) {
- int64_t dirty_end;
+ int64_t next_zero, chunk_end;
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
trace_block_copy_skip(s, start);
@@ -302,10 +287,15 @@ int coroutine_fn block_copy(BlockCopyState *s,
continue; /* already copied */
}
- dirty_end = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
- (end - start));
- if (dirty_end < 0) {
- dirty_end = end;
+ chunk_end = MIN(end, start + (s->use_copy_range ?
+ s->copy_range_size : s->cluster_size));
+
+ next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
+ chunk_end - start);
+ if (next_zero >= 0) {
+ assert(next_zero > start); /* start is dirty */
+ assert(next_zero < chunk_end); /* no need to do MIN() */
+ chunk_end = next_zero;
}
if (s->skip_unallocated) {
@@ -316,27 +306,21 @@ int coroutine_fn block_copy(BlockCopyState *s,
continue;
}
/* Clamp to known allocated region */
- dirty_end = MIN(dirty_end, start + status_bytes);
+ chunk_end = MIN(chunk_end, start + status_bytes);
}
trace_block_copy_process(s, start);
- if (s->use_copy_range) {
- ret = block_copy_with_offload(s, start, dirty_end);
- if (ret < 0) {
- s->use_copy_range = false;
- }
- }
- if (!s->use_copy_range) {
- ret = block_copy_with_bounce_buffer(s, start, dirty_end,
- error_is_read);
- }
+ bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
+
+ ret = block_copy_do_copy(s, start, chunk_end, error_is_read);
if (ret < 0) {
+ bdrv_set_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
break;
}
- start += ret;
- s->progress_bytes_callback(ret, s->progress_opaque);
+ s->progress_bytes_callback(chunk_end - start, s->progress_opaque);
+ start = chunk_end;
ret = 0;
}
diff --git a/block/trace-events b/block/trace-events
index b8d70f5242..ccde15a14c 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -45,9 +45,9 @@ backup_do_cow_return(void *job, int64_t offset, uint64_t
bytes, int ret) "job %p
block_copy_skip(void *bcs, int64_t start) "bcs %p start %"PRId64
block_copy_skip_range(void *bcs, int64_t start, uint64_t bytes) "bcs %p start
%"PRId64" bytes %"PRId64
block_copy_process(void *bcs, int64_t start) "bcs %p start %"PRId64
-block_copy_with_bounce_buffer_read_fail(void *bcs, int64_t start, int ret)
"bcs %p start %"PRId64" ret %d"
-block_copy_with_bounce_buffer_write_fail(void *bcs, int64_t start, int ret)
"bcs %p start %"PRId64" ret %d"
-block_copy_with_offload_fail(void *bcs, int64_t start, int ret) "bcs %p start
%"PRId64" ret %d"
+block_copy_copy_range_fail(void *bcs, int64_t start, int ret) "bcs %p start
%"PRId64" ret %d"
+block_copy_read_fail(void *bcs, int64_t start, int ret) "bcs %p start
%"PRId64" ret %d"
+block_copy_write_fail(void *bcs, int64_t start, int ret) "bcs %p start
%"PRId64" ret %d"
# ../blockdev.c
qmp_block_job_cancel(void *job) "job %p"
--
2.21.0
- [PATCH v3 0/6] block-copy: memory limit, Vladimir Sementsov-Ogievskiy, 2019/10/22
- [PATCH v3 2/6] block/block-copy: limit copy_range_size to 16 MiB, Vladimir Sementsov-Ogievskiy, 2019/10/22
- [PATCH v3 1/6] block/block-copy: allocate buffer in block_copy_with_bounce_buffer, Vladimir Sementsov-Ogievskiy, 2019/10/22
- [PATCH v3 4/6] util: introduce SharedResource, Vladimir Sementsov-Ogievskiy, 2019/10/22
- [PATCH v3 6/6] block/block-copy: increase buffered copy request, Vladimir Sementsov-Ogievskiy, 2019/10/22
- [PATCH v3 5/6] block/block-copy: add memory limit, Vladimir Sementsov-Ogievskiy, 2019/10/22
- [PATCH v3 3/6] block/block-copy: refactor copying,
Vladimir Sementsov-Ogievskiy <=
- Re: [PATCH v3 0/6] block-copy: memory limit, Max Reitz, 2019/10/25