[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-trivial] [PATCH v20 03/10] Backup: export interfaces for extra ser
From: |
Changlong Xie |
Subject: |
[Qemu-trivial] [PATCH v20 03/10] Backup: export interfaces for extra serialization |
Date: |
Wed, 8 Jun 2016 09:11:25 +0800 |
Normal backup(sync='none') workflow:
step 1. NBD peformance I/O write from client to server
qcow2_co_writev
bdrv_co_writev
...
bdrv_aligned_pwritev
notifier_with_return_list_notify -> backup_do_cow
bdrv_driver_pwritev // write new contents
step 2. drive-backup sync=none
backup_do_cow
{
wait_for_overlapping_requests
cow_request_begin
for(; start < end; start++) {
bdrv_co_readv_no_serialising //read old contents from Secondary disk
bdrv_co_writev // write old contents to hidden-disk
}
cow_request_end
}
step 3. Then roll back to "step 1" to write new contents to Secondary disk.
And for replication, we must make sure that we only read the old contents from
Secondary disk in order to keep contents consistent.
1) Replication workflow of Secondary
virtio-blk
^
-------> 1 NBD |
|| server 3 replication
|| ^ ^
|| | backing backing |
|| Secondary disk 6<-------- hidden-disk 5 <-------- active-disk 4
|| | ^
|| '-------------------------'
|| drive-backup sync=none 2
Hence, we need these interfaces to implement coarse-grained serialization
between
COW of Secondary disk and the read operation of replication.
Example codes about how to use them:
*#include "block/block_backup.h"
static coroutine_fn int xxx_co_readv()
{
CowRequest req;
BlockJob *job = secondary_disk->bs->job;
if (job) {
backup_wait_for_overlapping_requests(job, start, end);
backup_cow_request_begin(&req, job, start, end);
ret = bdrv_co_readv();
backup_cow_request_end(&req);
goto out;
}
ret = bdrv_co_readv();
out:
return ret;
}
Signed-off-by: Changlong Xie <address@hidden>
Signed-off-by: Wen Congyang <address@hidden>
---
block/backup.c | 41 ++++++++++++++++++++++++++++++++++-------
include/block/block_backup.h | 14 ++++++++++++++
2 files changed, 48 insertions(+), 7 deletions(-)
diff --git a/block/backup.c b/block/backup.c
index baf3936..b8e1c44 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -28,13 +28,6 @@
#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
#define SLICE_TIME 100000000ULL /* ns */
-typedef struct CowRequest {
- int64_t start;
- int64_t end;
- QLIST_ENTRY(CowRequest) list;
- CoQueue wait_queue; /* coroutines blocked on this request */
-} CowRequest;
-
typedef struct BackupBlockJob {
BlockJob common;
BlockBackend *target;
@@ -264,6 +257,40 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
bitmap_zero(backup_job->done_bitmap, len);
}
+void backup_wait_for_overlapping_requests(BlockJob *job, int64_t sector_num,
+ int nb_sectors)
+{
+ BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
+ int64_t sectors_per_cluster = cluster_size_sectors(backup_job);
+ int64_t start, end;
+
+ assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
+
+ start = sector_num / sectors_per_cluster;
+ end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
+ wait_for_overlapping_requests(backup_job, start, end);
+}
+
+void backup_cow_request_begin(CowRequest *req, BlockJob *job,
+ int64_t sector_num,
+ int nb_sectors)
+{
+ BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
+ int64_t sectors_per_cluster = cluster_size_sectors(backup_job);
+ int64_t start, end;
+
+ assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
+
+ start = sector_num / sectors_per_cluster;
+ end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
+ cow_request_begin(req, backup_job, start, end);
+}
+
+void backup_cow_request_end(CowRequest *req)
+{
+ cow_request_end(req);
+}
+
static const BlockJobDriver backup_job_driver = {
.instance_size = sizeof(BackupBlockJob),
.job_type = BLOCK_JOB_TYPE_BACKUP,
diff --git a/include/block/block_backup.h b/include/block/block_backup.h
index 3753bcb..e0e7ce6 100644
--- a/include/block/block_backup.h
+++ b/include/block/block_backup.h
@@ -1,3 +1,17 @@
#include "block/block_int.h"
+typedef struct CowRequest {
+ int64_t start;
+ int64_t end;
+ QLIST_ENTRY(CowRequest) list;
+ CoQueue wait_queue; /* coroutines blocked on this request */
+} CowRequest;
+
+void backup_wait_for_overlapping_requests(BlockJob *job, int64_t sector_num,
+ int nb_sectors);
+void backup_cow_request_begin(CowRequest *req, BlockJob *job,
+ int64_t sector_num,
+ int nb_sectors);
+void backup_cow_request_end(CowRequest *req);
+
void backup_do_checkpoint(BlockJob *job, Error **errp);
--
1.9.3
- [Qemu-trivial] [PATCH v20 00/10] Block replication for continuous checkpoints, Changlong Xie, 2016/06/07
- [Qemu-trivial] [PATCH v20 01/10] unblock backup operations in backing file, Changlong Xie, 2016/06/07
- [Qemu-trivial] [PATCH v20 02/10] Backup: clear all bitmap when doing block checkpoint, Changlong Xie, 2016/06/07
- [Qemu-trivial] [PATCH v20 04/10] Link backup into block core, Changlong Xie, 2016/06/07
- [Qemu-trivial] [PATCH v20 05/10] docs: block replication's description, Changlong Xie, 2016/06/07
- [Qemu-trivial] [PATCH v20 03/10] Backup: export interfaces for extra serialization,
Changlong Xie <=
- [Qemu-trivial] [PATCH v20 07/10] Introduce new APIs to do replication operation, Changlong Xie, 2016/06/07
- [Qemu-trivial] [PATCH v20 06/10] auto complete active commit, Changlong Xie, 2016/06/07
- [Qemu-trivial] [PATCH v20 10/10] support replication driver in blockdev-add, Changlong Xie, 2016/06/07
- [Qemu-trivial] [PATCH v20 09/10] tests: add unit test case for replication, Changlong Xie, 2016/06/07
- [Qemu-trivial] [PATCH v20 08/10] Implement new driver for block replication, Changlong Xie, 2016/06/07
- Re: [Qemu-trivial] [PATCH v20 00/10] Block replication for continuous checkpoints, Eric Blake, 2016/06/08
- Re: [Qemu-trivial] [PATCH v20 00/10] Block replication for continuous checkpoints, Michael Tokarev, 2016/06/10