[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 14/19] block/block-copy: use block_status
From: |
Max Reitz |
Subject: |
[PULL 14/19] block/block-copy: use block_status |
Date: |
Wed, 11 Mar 2020 14:52:08 +0100 |
From: Vladimir Sementsov-Ogievskiy <address@hidden>
Use bdrv_block_status_above to chose effective chunk size and to handle
zeroes effectively.
This substitutes checking for just being allocated or not, and drops
old code path for it. Assistance by backup job is dropped too, as
caching block-status information is more difficult than just caching
is-allocated information in our dirty bitmap, and backup job is not
good place for this caching anyway.
Signed-off-by: Vladimir Sementsov-Ogievskiy <address@hidden>
Reviewed-by: Andrey Shinkevich <address@hidden>
Reviewed-by: Max Reitz <address@hidden>
Message-Id: <address@hidden>
Signed-off-by: Max Reitz <address@hidden>
---
block/block-copy.c | 73 +++++++++++++++++++++++++++++++++++++---------
block/trace-events | 1 +
2 files changed, 61 insertions(+), 13 deletions(-)
diff --git a/block/block-copy.c b/block/block-copy.c
index ddd61c1652..b075dba206 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -155,7 +155,7 @@ void block_copy_set_progress_meter(BlockCopyState *s,
ProgressMeter *pm)
*/
static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
int64_t start, int64_t end,
- bool *error_is_read)
+ bool zeroes, bool *error_is_read)
{
int ret;
int nbytes = MIN(end, s->len) - start;
@@ -165,6 +165,18 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState
*s,
assert(QEMU_IS_ALIGNED(end, s->cluster_size));
assert(end < s->len || end == QEMU_ALIGN_UP(s->len, s->cluster_size));
+ if (zeroes) {
+ ret = bdrv_co_pwrite_zeroes(s->target, start, nbytes, s->write_flags &
+ ~BDRV_REQ_WRITE_COMPRESSED);
+ if (ret < 0) {
+ trace_block_copy_write_zeroes_fail(s, start, ret);
+ if (error_is_read) {
+ *error_is_read = false;
+ }
+ }
+ return ret;
+ }
+
if (s->use_copy_range) {
ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
0, s->write_flags);
@@ -230,6 +242,38 @@ out:
return ret;
}
+static int block_copy_block_status(BlockCopyState *s, int64_t offset,
+ int64_t bytes, int64_t *pnum)
+{
+ int64_t num;
+ BlockDriverState *base;
+ int ret;
+
+ if (s->skip_unallocated && s->source->bs->backing) {
+ base = s->source->bs->backing->bs;
+ } else {
+ base = NULL;
+ }
+
+ ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num,
+ NULL, NULL);
+ if (ret < 0 || num < s->cluster_size) {
+ /*
+ * On error or if failed to obtain large enough chunk just fallback to
+ * copy one cluster.
+ */
+ num = s->cluster_size;
+ ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
+ } else if (offset + num == s->len) {
+ num = QEMU_ALIGN_UP(num, s->cluster_size);
+ } else {
+ num = QEMU_ALIGN_DOWN(num, s->cluster_size);
+ }
+
+ *pnum = num;
+ return ret;
+}
+
/*
* Check if the cluster starting at offset is allocated or not.
* return via pnum the number of contiguous clusters sharing this allocation.
@@ -308,7 +352,6 @@ int coroutine_fn block_copy(BlockCopyState *s,
{
int ret = 0;
int64_t end = bytes + start; /* bytes */
- int64_t status_bytes;
BlockCopyInFlightReq req;
/*
@@ -325,7 +368,7 @@ int coroutine_fn block_copy(BlockCopyState *s,
block_copy_inflight_req_begin(s, &req, start, end);
while (start < end) {
- int64_t next_zero, chunk_end;
+ int64_t next_zero, chunk_end, status_bytes;
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
trace_block_copy_skip(s, start);
@@ -343,24 +386,28 @@ int coroutine_fn block_copy(BlockCopyState *s,
chunk_end = next_zero;
}
- if (s->skip_unallocated) {
- ret = block_copy_reset_unallocated(s, start, &status_bytes);
- if (ret == 0) {
- trace_block_copy_skip_range(s, start, status_bytes);
- start += status_bytes;
- continue;
- }
- /* Clamp to known allocated region */
- chunk_end = MIN(chunk_end, start + status_bytes);
+ ret = block_copy_block_status(s, start, chunk_end - start,
+ &status_bytes);
+ if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
+ bdrv_reset_dirty_bitmap(s->copy_bitmap, start, status_bytes);
+ progress_set_remaining(s->progress,
+ bdrv_get_dirty_count(s->copy_bitmap) +
+ s->in_flight_bytes);
+ trace_block_copy_skip_range(s, start, status_bytes);
+ start += status_bytes;
+ continue;
}
+ chunk_end = MIN(chunk_end, start + status_bytes);
+
trace_block_copy_process(s, start);
bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
s->in_flight_bytes += chunk_end - start;
co_get_from_shres(s->mem, chunk_end - start);
- ret = block_copy_do_copy(s, start, chunk_end, error_is_read);
+ ret = block_copy_do_copy(s, start, chunk_end, ret & BDRV_BLOCK_ZERO,
+ error_is_read);
co_put_to_shres(s->mem, chunk_end - start);
s->in_flight_bytes -= chunk_end - start;
if (ret < 0) {
diff --git a/block/trace-events b/block/trace-events
index 1a7329b736..29dff8881c 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -48,6 +48,7 @@ block_copy_process(void *bcs, int64_t start) "bcs %p start
%"PRId64
block_copy_copy_range_fail(void *bcs, int64_t start, int ret) "bcs %p start
%"PRId64" ret %d"
block_copy_read_fail(void *bcs, int64_t start, int ret) "bcs %p start
%"PRId64" ret %d"
block_copy_write_fail(void *bcs, int64_t start, int ret) "bcs %p start
%"PRId64" ret %d"
+block_copy_write_zeroes_fail(void *bcs, int64_t start, int ret) "bcs %p start
%"PRId64" ret %d"
# ../blockdev.c
qmp_block_job_cancel(void *job) "job %p"
--
2.24.1
- [PULL 04/19] iotests: add 288 luks qemu-img measure test, (continued)
- [PULL 04/19] iotests: add 288 luks qemu-img measure test, Max Reitz, 2020/03/11
- [PULL 06/19] block/curl: HTTP header field names are case insensitive, Max Reitz, 2020/03/11
- [PULL 09/19] qemu-img: free memory before re-assign, Max Reitz, 2020/03/11
- [PULL 07/19] iotests: Fix nonportable use of od --endian, Max Reitz, 2020/03/11
- [PULL 08/19] block/qcow2: do free crypto_opts in qcow2_close(), Max Reitz, 2020/03/11
- [PULL 03/19] qemu-img: allow qemu-img measure --object without a filename, Max Reitz, 2020/03/11
- [PULL 10/19] block/qcow2-threads: fix qcow2_decompress, Max Reitz, 2020/03/11
- [PULL 12/19] block/block-copy: fix progress calculation, Max Reitz, 2020/03/11
- [PULL 11/19] job: refactor progress to separate object, Max Reitz, 2020/03/11
- [PULL 13/19] block/block-copy: specialcase first copy_range request, Max Reitz, 2020/03/11
- [PULL 14/19] block/block-copy: use block_status,
Max Reitz <=
- [PULL 15/19] block/block-copy: factor out find_conflicting_inflight_req, Max Reitz, 2020/03/11
- [PULL 16/19] block/block-copy: refactor interfaces to use bytes instead of end, Max Reitz, 2020/03/11
- [PULL 18/19] block/block-copy: reduce intersecting request lock, Max Reitz, 2020/03/11
- [PULL 17/19] block/block-copy: rename start to offset in interfaces, Max Reitz, 2020/03/11
- [PULL 19/19] block/block-copy: hide structure definitions, Max Reitz, 2020/03/11
- Re: [PULL 00/19] Block patches, Peter Maydell, 2020/03/12