qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 3/8] block: add image streaming block job


From: Marcelo Tosatti
Subject: Re: [Qemu-devel] [PATCH 3/8] block: add image streaming block job
Date: Tue, 1 Nov 2011 16:06:12 -0200
User-agent: Mutt/1.5.21 (2010-09-15)

On Thu, Oct 27, 2011 at 04:22:50PM +0100, Stefan Hajnoczi wrote:
> Signed-off-by: Stefan Hajnoczi <address@hidden>
> ---
>  Makefile.objs  |    1 +
>  block/stream.c |  135 
> ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  block_int.h    |    3 +
>  trace-events   |    4 ++
>  4 files changed, 143 insertions(+), 0 deletions(-)
>  create mode 100644 block/stream.c
> 
> diff --git a/Makefile.objs b/Makefile.objs
> index c290fd3..802db96 100644
> --- a/Makefile.objs
> +++ b/Makefile.objs
> @@ -34,6 +34,7 @@ block-nested-y += qcow2.o qcow2-refcount.o qcow2-cluster.o 
> qcow2-snapshot.o qcow
>  block-nested-y += qed.o qed-gencb.o qed-l2-cache.o qed-table.o qed-cluster.o
>  block-nested-y += qed-check.o
>  block-nested-y += parallels.o nbd.o blkdebug.o sheepdog.o blkverify.o
> +block-nested-y += stream.o
>  block-nested-$(CONFIG_WIN32) += raw-win32.o
>  block-nested-$(CONFIG_POSIX) += raw-posix.o
>  block-nested-$(CONFIG_CURL) += curl.o
> diff --git a/block/stream.c b/block/stream.c
> new file mode 100644
> index 0000000..8cdf566
> --- /dev/null
> +++ b/block/stream.c
> @@ -0,0 +1,135 @@
> +/*
> + * Image streaming
> + *
> + * Copyright IBM, Corp. 2011
> + *
> + * Authors:
> + *  Stefan Hajnoczi   <address@hidden>
> + *
> + * This work is licensed under the terms of the GNU LGPL, version 2 or later.
> + * See the COPYING.LIB file in the top-level directory.
> + *
> + */
> +
> +#include "trace.h"
> +#include "block_int.h"
> +
> +enum {
> +    /*
> +     * Size of data buffer for populating the image file.  This should be 
> large
> +     * enough to process multiple clusters in a single call, so that 
> populating
> +     * contiguous regions of the image is efficient.
> +     */
> +    STREAM_BUFFER_SIZE = 512 * 1024, /* in bytes */
> +};
> +
> +typedef struct StreamBlockJob {
> +    BlockJob common;
> +    BlockDriverState *base;
> +} StreamBlockJob;
> +
> +static int coroutine_fn stream_populate(BlockDriverState *bs,
> +                                        int64_t sector_num, int nb_sectors,
> +                                        void *buf)
> +{
> +    struct iovec iov = {
> +        .iov_base = buf,
> +        .iov_len  = nb_sectors * BDRV_SECTOR_SIZE,
> +    };
> +    QEMUIOVector qiov;
> +
> +    qemu_iovec_init_external(&qiov, &iov, 1);
> +
> +    /* Copy-on-read the unallocated clusters */
> +    return bdrv_co_readv(bs, sector_num, nb_sectors, &qiov);
> +}
> +
> +static int stream_one_iteration(StreamBlockJob *s, int64_t sector_num,
> +                                void *buf, int max_sectors, int *n)
> +{
> +    BlockDriverState *bs = s->common.bs;
> +    int ret;
> +
> +    trace_stream_one_iteration(s, sector_num, max_sectors);
> +
> +    ret = bdrv_is_allocated(bs, sector_num, max_sectors, n);
> +    if (ret < 0) {
> +        return ret;
> +    }

bdrv_is_allocated is still synchronous? If so, there should be at least
a plan to make it asynchronous.

> +    if (!ret) {
> +        ret = stream_populate(bs, sector_num, *n, buf);
> +    }
> +    return ret;
> +}
> +
> +static void coroutine_fn stream_run(void *opaque)
> +{
> +    StreamBlockJob *s = opaque;
> +    BlockDriverState *bs = s->common.bs;
> +    int64_t sector_num, end;
> +    int ret = 0;
> +    int n;
> +    void *buf;
> +
> +    buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE);
> +    s->common.len = bdrv_getlength(bs);
> +    bdrv_get_geometry(bs, (uint64_t *)&end);
> +
> +    bdrv_set_zero_detection(bs, true);
> +    bdrv_set_copy_on_read(bs, true);

Should distinguish between stream initiated and user initiated setting 
of zero detection and cor (so that unsetting below does not clear
user settings).

> +
> +    for (sector_num = 0; sector_num < end; sector_num += n) {
> +        if (block_job_is_cancelled(&s->common)) {
> +            break;
> +        }

If cancellation is seen here in the last loop iteration,
bdrv_change_backing_file below should not be executed.

> +
> +        /* TODO rate-limit */
> +        /* Note that even when no rate limit is applied we need to yield with
> +         * no pending I/O here so that qemu_aio_flush() is able to return.
> +         */
> +        co_sleep_ns(rt_clock, 0);

How do you plan to implement rate limit?

> +
> +        ret = stream_one_iteration(s, sector_num, buf,
> +                                   STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, 
> &n);
> +        if (ret < 0) {
> +            break;
> +        }
> +
> +        /* Publish progress */
> +        s->common.offset += n * BDRV_SECTOR_SIZE;
> +    }
> +
> +    bdrv_set_copy_on_read(bs, false);
> +    bdrv_set_zero_detection(bs, false);
> +
> +    if (sector_num == end && ret == 0) {
> +        bdrv_change_backing_file(bs, NULL, NULL);
> +    }
> +
> +    qemu_vfree(buf);
> +    block_job_complete(&s->common, ret);
> +}
> +
> +static BlockJobType stream_job_type = {
> +    .instance_size = sizeof(StreamBlockJob),
> +    .job_type      = "stream",
> +};
> +
> +int stream_start(BlockDriverState *bs, BlockDriverState *base,
> +                 BlockDriverCompletionFunc *cb, void *opaque)
> +{
> +    StreamBlockJob *s;
> +    Coroutine *co;
> +
> +    if (bs->job) {
> +        return -EBUSY;
> +    }
> +
> +    s = block_job_create(&stream_job_type, bs, cb, opaque);
> +    s->base = base;
> +
> +    co = qemu_coroutine_create(stream_run);
> +    trace_stream_start(bs, base, s, co, opaque);
> +    qemu_coroutine_enter(co, s);
> +    return 0;
> +}

I'd like to see the shared base code implemented before this is merged.

On a related note, the maze of coroutine locks and waiting queues makes
it difficult to have a clear picture of the execution flow (perhaps that
is due to lack of familiarity with the use of coroutines in the block
code).




reply via email to

[Prev in Thread] Current Thread [Next in Thread]