qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 14/31] gluster: Switch to .bdrv_co_block_status(


From: Prasanna Kalever
Subject: Re: [Qemu-devel] [PATCH 14/31] gluster: Switch to .bdrv_co_block_status()
Date: Wed, 19 Apr 2017 19:38:15 +0530

oops!

That's 'bytes'.

On Wed, Apr 19, 2017 at 7:36 PM, Prasanna Kalever <address@hidden> wrote:
> On Tue, Apr 18, 2017 at 7:03 AM, Eric Blake <address@hidden> wrote:
>> We are gradually moving away from sector-based interfaces, towards
>> byte-based.  Update the gluster driver accordingly.
>>
>> Signed-off-by: Eric Blake <address@hidden>
>> ---
>>  block/gluster.c | 47 +++++++++++++++++++++++------------------------
>>  1 file changed, 23 insertions(+), 24 deletions(-)
>>
>> diff --git a/block/gluster.c b/block/gluster.c
>> index 1d4e2f7..3f252c6 100644
>> --- a/block/gluster.c
>> +++ b/block/gluster.c
>> @@ -1332,24 +1332,24 @@ exit:
>>  /*
>>   * Returns the allocation status of the specified sectors.
>>   *
>> - * If 'sector_num' is beyond the end of the disk image the return value is 0
>> + * If 'offset' is beyond the end of the disk image the return value is 0
>>   * and 'pnum' is set to 0.
>>   *
>> - * 'pnum' is set to the number of sectors (including and immediately 
>> following
>> - * the specified sector) that are known to be in the same
>> + * 'pnum' is set to the number of bytes (including and immediately following
>> + * the specified offset) that are known to be in the same
>>   * allocated/unallocated state.
>>   *
>> - * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors 
>> goes
>> + * 'bytes' is the max value 'pnum' should be set to.  If bytes goes
>>   * beyond the end of the disk image it will be clamped.
>>   *
>> - * (Based on raw_co_get_block_status() from file-posix.c.)
>> + * (Based on raw_co_block_status() from file-posix.c.)
>>   */
>> -static int64_t coroutine_fn qemu_gluster_co_get_block_status(
>> -        BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
>> +static int64_t coroutine_fn qemu_gluster_co_block_status(
>> +        BlockDriverState *bs, int64_t offset, int64_t bytes, int64_t *pnum,
>>          BlockDriverState **file)
>>  {
>>      BDRVGlusterState *s = bs->opaque;
>> -    off_t start, data = 0, hole = 0;
>> +    off_t data = 0, hole = 0;
>>      int64_t total_size;
>>      int ret = -EINVAL;
>>
>> @@ -1357,41 +1357,40 @@ static int64_t coroutine_fn 
>> qemu_gluster_co_get_block_status(
>>          return ret;
>>      }
>>
>> -    start = sector_num * BDRV_SECTOR_SIZE;
>>      total_size = bdrv_getlength(bs);
>>      if (total_size < 0) {
>>          return total_size;
>> -    } else if (start >= total_size) {
>> +    } else if (offset >= total_size) {
>>          *pnum = 0;
>>          return 0;
>> -    } else if (start + nb_sectors * BDRV_SECTOR_SIZE > total_size) {
>> -        nb_sectors = DIV_ROUND_UP(total_size - start, BDRV_SECTOR_SIZE);
>> +    } else if (offset + bytes > total_size) {
>> +        bytes = total_size - offset;
>>      }
>>
>> -    ret = find_allocation(bs, start, &data, &hole);
>> +    ret = find_allocation(bs, offset, &data, &hole);
>>      if (ret == -ENXIO) {
>>          /* Trailing hole */
>> -        *pnum = nb_sectors;
>> +        *pnum = bytes;
>>          ret = BDRV_BLOCK_ZERO;
>>      } else if (ret < 0) {
>>          /* No info available, so pretend there are no holes */
>> -        *pnum = nb_sectors;
>> +        *pnum = bytes;
>>          ret = BDRV_BLOCK_DATA;
>> -    } else if (data == start) {
>> +    } else if (data == offset) {
>>          /* On a data extent, compute sectors to the end of the extent,
>
> s/sectors/byes/
>
>>           * possibly including a partial sector at EOF. */
>> -        *pnum = MIN(nb_sectors, DIV_ROUND_UP(hole - start, 
>> BDRV_SECTOR_SIZE));
>> +        *pnum = MIN(bytes, hole - offset);
>>          ret = BDRV_BLOCK_DATA;
>>      } else {
>>          /* On a hole, compute sectors to the beginning of the next extent.  
>> */
>
> s/sectors/byes/
>
>
> --
> prasanna
>
>> -        assert(hole == start);
>> -        *pnum = MIN(nb_sectors, (data - start) / BDRV_SECTOR_SIZE);
>> +        assert(hole == offset);
>> +        *pnum = MIN(bytes, data - offset);
>>          ret = BDRV_BLOCK_ZERO;
>>      }
>>
>>      *file = bs;
>>
>> -    return ret | BDRV_BLOCK_OFFSET_VALID | start;
>> +    return ret | BDRV_BLOCK_OFFSET_VALID | (offset & 
>> BDRV_BLOCK_OFFSET_MASK);
>>  }
>>
>>
>> @@ -1419,7 +1418,7 @@ static BlockDriver bdrv_gluster = {
>>  #ifdef CONFIG_GLUSTERFS_ZEROFILL
>>      .bdrv_co_pwrite_zeroes        = qemu_gluster_co_pwrite_zeroes,
>>  #endif
>> -    .bdrv_co_get_block_status     = qemu_gluster_co_get_block_status,
>> +    .bdrv_co_block_status         = qemu_gluster_co_block_status,
>>      .create_opts                  = &qemu_gluster_create_opts,
>>  };
>>
>> @@ -1447,7 +1446,7 @@ static BlockDriver bdrv_gluster_tcp = {
>>  #ifdef CONFIG_GLUSTERFS_ZEROFILL
>>      .bdrv_co_pwrite_zeroes        = qemu_gluster_co_pwrite_zeroes,
>>  #endif
>> -    .bdrv_co_get_block_status     = qemu_gluster_co_get_block_status,
>> +    .bdrv_co_block_status         = qemu_gluster_co_block_status,
>>      .create_opts                  = &qemu_gluster_create_opts,
>>  };
>>
>> @@ -1475,7 +1474,7 @@ static BlockDriver bdrv_gluster_unix = {
>>  #ifdef CONFIG_GLUSTERFS_ZEROFILL
>>      .bdrv_co_pwrite_zeroes        = qemu_gluster_co_pwrite_zeroes,
>>  #endif
>> -    .bdrv_co_get_block_status     = qemu_gluster_co_get_block_status,
>> +    .bdrv_co_block_status         = qemu_gluster_co_block_status,
>>      .create_opts                  = &qemu_gluster_create_opts,
>>  };
>>
>> @@ -1509,7 +1508,7 @@ static BlockDriver bdrv_gluster_rdma = {
>>  #ifdef CONFIG_GLUSTERFS_ZEROFILL
>>      .bdrv_co_pwrite_zeroes        = qemu_gluster_co_pwrite_zeroes,
>>  #endif
>> -    .bdrv_co_get_block_status     = qemu_gluster_co_get_block_status,
>> +    .bdrv_co_block_status         = qemu_gluster_co_block_status,
>>      .create_opts                  = &qemu_gluster_create_opts,
>>  };
>>
>> --
>> 2.9.3
>>
>>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]