[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-block] [PATCH v3 4/6] nbd: Rely on block layer to break up lar
From: |
Paolo Bonzini |
Subject: |
Re: [Qemu-block] [PATCH v3 4/6] nbd: Rely on block layer to break up large requests |
Date: |
Mon, 18 Jul 2016 10:16:11 +0200 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Thunderbird/45.1.1 |
On 15/07/2016 20:32, Eric Blake wrote:
> Now that the block layer will honor max_transfer, we can simplify
> our code to rely on that guarantee.
>
> The readv code can call directly into nbd-client, just as the
> writev code has done since commit 52a4650.
>
> Interestingly enough, while qemu-io 'w 0 40m' splits into a 32M
> and 8M transaction, 'w -z 0 40m' splits into two 16M and an 8M,
> because the block layer caps the bounce buffer for writing zeroes
> at 16M. When we later introduce support for NBD_CMD_WRITE_ZEROES,
> we can get a full 32M zero write (or larger, if the client and
> server negotiate that write zeroes can use a larger size than
> ordinary writes).
>
> Signed-off-by: Eric Blake <address@hidden>
> Reviewed-by: Fam Zheng <address@hidden>
> Reviewed-by: Stefan Hajnoczi <address@hidden>
> ---
> block/nbd-client.c | 51 ++++++++-------------------------------------------
> block/nbd.c | 12 +++---------
> 2 files changed, 11 insertions(+), 52 deletions(-)
>
> diff --git a/block/nbd-client.c b/block/nbd-client.c
> index 4cc408d..f1fb58b 100644
> --- a/block/nbd-client.c
> +++ b/block/nbd-client.c
> @@ -217,15 +217,15 @@ static void nbd_coroutine_end(NbdClientSession *s,
> }
> }
>
> -static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
> - int nb_sectors, QEMUIOVector *qiov,
> - int offset)
> +int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
> + int nb_sectors, QEMUIOVector *qiov)
> {
> NbdClientSession *client = nbd_get_client_session(bs);
> struct nbd_request request = { .type = NBD_CMD_READ };
> struct nbd_reply reply;
> ssize_t ret;
>
> + assert(nb_sectors <= NBD_MAX_SECTORS);
> request.from = sector_num * 512;
> request.len = nb_sectors * 512;
>
> @@ -234,16 +234,15 @@ static int nbd_co_readv_1(BlockDriverState *bs, int64_t
> sector_num,
> if (ret < 0) {
> reply.error = -ret;
> } else {
> - nbd_co_receive_reply(client, &request, &reply, qiov, offset);
> + nbd_co_receive_reply(client, &request, &reply, qiov, 0);
> }
> nbd_coroutine_end(client, &request);
> return -reply.error;
>
> }
>
> -static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
> - int nb_sectors, QEMUIOVector *qiov,
> - int offset, int flags)
> +int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
> + int nb_sectors, QEMUIOVector *qiov, int flags)
> {
> NbdClientSession *client = nbd_get_client_session(bs);
> struct nbd_request request = { .type = NBD_CMD_WRITE };
> @@ -255,11 +254,12 @@ static int nbd_co_writev_1(BlockDriverState *bs,
> int64_t sector_num,
> request.type |= NBD_CMD_FLAG_FUA;
> }
>
> + assert(nb_sectors <= NBD_MAX_SECTORS);
> request.from = sector_num * 512;
> request.len = nb_sectors * 512;
>
> nbd_coroutine_start(client, &request);
> - ret = nbd_co_send_request(bs, &request, qiov, offset);
> + ret = nbd_co_send_request(bs, &request, qiov, 0);
> if (ret < 0) {
> reply.error = -ret;
> } else {
> @@ -269,41 +269,6 @@ static int nbd_co_writev_1(BlockDriverState *bs, int64_t
> sector_num,
> return -reply.error;
> }
>
> -int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
> - int nb_sectors, QEMUIOVector *qiov)
> -{
> - int offset = 0;
> - int ret;
> - while (nb_sectors > NBD_MAX_SECTORS) {
> - ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
> - if (ret < 0) {
> - return ret;
> - }
> - offset += NBD_MAX_SECTORS * 512;
> - sector_num += NBD_MAX_SECTORS;
> - nb_sectors -= NBD_MAX_SECTORS;
> - }
> - return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset);
> -}
> -
> -int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
> - int nb_sectors, QEMUIOVector *qiov, int flags)
> -{
> - int offset = 0;
> - int ret;
> - while (nb_sectors > NBD_MAX_SECTORS) {
> - ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset,
> - flags);
> - if (ret < 0) {
> - return ret;
> - }
> - offset += NBD_MAX_SECTORS * 512;
> - sector_num += NBD_MAX_SECTORS;
> - nb_sectors -= NBD_MAX_SECTORS;
> - }
> - return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset, flags);
> -}
> -
> int nbd_client_co_flush(BlockDriverState *bs)
> {
> NbdClientSession *client = nbd_get_client_session(bs);
> diff --git a/block/nbd.c b/block/nbd.c
> index 08e5b67..8a13078 100644
> --- a/block/nbd.c
> +++ b/block/nbd.c
> @@ -349,12 +349,6 @@ static int nbd_open(BlockDriverState *bs, QDict
> *options, int flags,
> return ret;
> }
>
> -static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num,
> - int nb_sectors, QEMUIOVector *qiov)
> -{
> - return nbd_client_co_readv(bs, sector_num, nb_sectors, qiov);
> -}
> -
> static int nbd_co_flush(BlockDriverState *bs)
> {
> return nbd_client_co_flush(bs);
> @@ -450,7 +444,7 @@ static BlockDriver bdrv_nbd = {
> .instance_size = sizeof(BDRVNBDState),
> .bdrv_parse_filename = nbd_parse_filename,
> .bdrv_file_open = nbd_open,
> - .bdrv_co_readv = nbd_co_readv,
> + .bdrv_co_readv = nbd_client_co_readv,
> .bdrv_co_writev_flags = nbd_client_co_writev,
> .bdrv_close = nbd_close,
> .bdrv_co_flush_to_os = nbd_co_flush,
> @@ -468,7 +462,7 @@ static BlockDriver bdrv_nbd_tcp = {
> .instance_size = sizeof(BDRVNBDState),
> .bdrv_parse_filename = nbd_parse_filename,
> .bdrv_file_open = nbd_open,
> - .bdrv_co_readv = nbd_co_readv,
> + .bdrv_co_readv = nbd_client_co_readv,
> .bdrv_co_writev_flags = nbd_client_co_writev,
> .bdrv_close = nbd_close,
> .bdrv_co_flush_to_os = nbd_co_flush,
> @@ -486,7 +480,7 @@ static BlockDriver bdrv_nbd_unix = {
> .instance_size = sizeof(BDRVNBDState),
> .bdrv_parse_filename = nbd_parse_filename,
> .bdrv_file_open = nbd_open,
> - .bdrv_co_readv = nbd_co_readv,
> + .bdrv_co_readv = nbd_client_co_readv,
> .bdrv_co_writev_flags = nbd_client_co_writev,
> .bdrv_close = nbd_close,
> .bdrv_co_flush_to_os = nbd_co_flush,
>
Acked-by: Paolo Bonzini <address@hidden>
- [Qemu-block] [PATCH v3 0/6] Auto-fragment large transactions at the block layer, Eric Blake, 2016/07/15
- [Qemu-block] [PATCH v3 5/6] nbd: Drop unused offset parameter, Eric Blake, 2016/07/15
- [Qemu-block] [PATCH v3 6/6] iscsi: Rely on block layer to break up large requests, Eric Blake, 2016/07/15
- [Qemu-block] [PATCH v3 1/6] block: Fragment reads to max transfer length, Eric Blake, 2016/07/15
- [Qemu-block] [PATCH v3 2/6] raw_bsd: Don't advertise flags not supported by protocol layer, Eric Blake, 2016/07/15
- [Qemu-block] [PATCH v3 4/6] nbd: Rely on block layer to break up large requests, Eric Blake, 2016/07/15
- Re: [Qemu-block] [PATCH v3 4/6] nbd: Rely on block layer to break up large requests,
Paolo Bonzini <=
- [Qemu-block] [PATCH v3 3/6] block: Fragment writes to max transfer length, Eric Blake, 2016/07/15
- Re: [Qemu-block] [Qemu-devel] [PATCH v3 0/6] Auto-fragment large transactions at the block layer, Stefan Hajnoczi, 2016/07/19