qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 07/18] block: introduce bdrv_poll_while and bdrv


From: Fam Zheng
Subject: Re: [Qemu-devel] [PATCH 07/18] block: introduce bdrv_poll_while and bdrv_wakeup
Date: Fri, 14 Oct 2016 18:42:12 +0800
User-agent: Mutt/1.7.0 (2016-08-17)

On Thu, 10/13 19:34, Paolo Bonzini wrote:
> We want the BDS event loop to run exclusively in the iothread that
> owns the BDS's AioContext.  This function and macro provides the
> synchronization between the two event loops.
> 
> Signed-off-by: Paolo Bonzini <address@hidden>
> ---
>  block/block-backend.c     |  7 +------
>  block/io.c                | 47 
> +++++++++++------------------------------------
>  block/qed-table.c         | 16 ++++------------
>  block/qed.c               |  4 +++-
>  include/block/block.h     |  9 +++++++++
>  include/block/block_int.h |  1 +
>  6 files changed, 29 insertions(+), 55 deletions(-)
> 
> diff --git a/block/block-backend.c b/block/block-backend.c
> index 234df1e..c5c2597 100644
> --- a/block/block-backend.c
> +++ b/block/block-backend.c
> @@ -878,7 +878,6 @@ static int blk_prw(BlockBackend *blk, int64_t offset, 
> uint8_t *buf,
>                     int64_t bytes, CoroutineEntry co_entry,
>                     BdrvRequestFlags flags)
>  {
> -    AioContext *aio_context;
>      QEMUIOVector qiov;
>      struct iovec iov;
>      Coroutine *co;
> @@ -900,11 +899,7 @@ static int blk_prw(BlockBackend *blk, int64_t offset, 
> uint8_t *buf,
>  
>      co = qemu_coroutine_create(co_entry, &rwco);
>      qemu_coroutine_enter(co);
> -
> -    aio_context = blk_get_aio_context(blk);
> -    while (rwco.ret == NOT_DONE) {
> -        aio_poll(aio_context, true);
> -    }
> +    bdrv_poll_while(blk_bs(blk), rwco.ret == NOT_DONE);

Can we make it "BDRV_POLL_WHILE"? With lower case the mental model of a reader
would be "evaluate rwco.ret == NOT_DONE" first before doing the poll.

>  
>      return rwco.ret;
>  }
> diff --git a/block/io.c b/block/io.c
> index afec968..7d3dcfc 100644
> --- a/block/io.c
> +++ b/block/io.c
> @@ -156,23 +156,12 @@ bool bdrv_requests_pending(BlockDriverState *bs)
>      return false;
>  }
>  
> -static bool bdrv_drain_poll(BlockDriverState *bs)
> -{
> -    bool waited = false;
> -
> -    while (atomic_read(&bs->in_flight) > 0) {
> -        aio_poll(bdrv_get_aio_context(bs), true);
> -        waited = true;
> -    }
> -    return waited;
> -}
> -
>  static bool bdrv_drain_recurse(BlockDriverState *bs)
>  {
>      BdrvChild *child;
>      bool waited;
>  
> -    waited = bdrv_drain_poll(bs);
> +    waited = bdrv_poll_while(bs, atomic_read(&bs->in_flight) > 0);
>  
>      if (bs->drv && bs->drv->bdrv_drain) {
>          bs->drv->bdrv_drain(bs);
> @@ -485,9 +474,14 @@ void bdrv_inc_in_flight(BlockDriverState *bs)
>      atomic_inc(&bs->in_flight);
>  }
>  
> +void bdrv_wakeup(BlockDriverState *bs)
> +{
> +}
> +
>  void bdrv_dec_in_flight(BlockDriverState *bs)
>  {
>      atomic_dec(&bs->in_flight);
> +    bdrv_wakeup(bs);
>  }
>  
>  static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
> @@ -597,13 +591,9 @@ static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
>          /* Fast-path if already in coroutine context */
>          bdrv_rw_co_entry(&rwco);
>      } else {
> -        AioContext *aio_context = bdrv_get_aio_context(child->bs);
> -
>          co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
>          qemu_coroutine_enter(co);
> -        while (rwco.ret == NOT_DONE) {
> -            aio_poll(aio_context, true);
> -        }
> +        bdrv_poll_while(child->bs, rwco.ret == NOT_DONE);
>      }
>      return rwco.ret;
>  }
> @@ -1845,14 +1835,10 @@ int64_t bdrv_get_block_status_above(BlockDriverState 
> *bs,
>          /* Fast-path if already in coroutine context */
>          bdrv_get_block_status_above_co_entry(&data);
>      } else {
> -        AioContext *aio_context = bdrv_get_aio_context(bs);
> -
>          co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
>                                     &data);
>          qemu_coroutine_enter(co);
> -        while (!data.done) {
> -            aio_poll(aio_context, true);
> -        }
> +        bdrv_poll_while(bs, !data.done);
>      }
>      return data.ret;
>  }
> @@ -2411,13 +2397,9 @@ int bdrv_flush(BlockDriverState *bs)
>          /* Fast-path if already in coroutine context */
>          bdrv_flush_co_entry(&flush_co);
>      } else {
> -        AioContext *aio_context = bdrv_get_aio_context(bs);
> -
>          co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
>          qemu_coroutine_enter(co);
> -        while (flush_co.ret == NOT_DONE) {
> -            aio_poll(aio_context, true);
> -        }
> +        bdrv_poll_while(bs, flush_co.ret == NOT_DONE);
>      }
>  
>      return flush_co.ret;
> @@ -2543,13 +2525,9 @@ int bdrv_pdiscard(BlockDriverState *bs, int64_t 
> offset, int count)
>          /* Fast-path if already in coroutine context */
>          bdrv_pdiscard_co_entry(&rwco);
>      } else {
> -        AioContext *aio_context = bdrv_get_aio_context(bs);
> -
>          co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
>          qemu_coroutine_enter(co);
> -        while (rwco.ret == NOT_DONE) {
> -            aio_poll(aio_context, true);
> -        }
> +        bdrv_poll_while(bs, rwco.ret == NOT_DONE);
>      }
>  
>      return rwco.ret;
> @@ -2608,11 +2586,8 @@ int bdrv_ioctl(BlockDriverState *bs, unsigned long int 
> req, void *buf)
>          bdrv_co_ioctl_entry(&data);
>      } else {
>          Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry, &data);
> -
>          qemu_coroutine_enter(co);
> -        while (data.ret == -EINPROGRESS) {
> -            aio_poll(bdrv_get_aio_context(bs), true);
> -        }
> +        bdrv_poll_while(bs, data.ret == -EINPROGRESS);
>      }
>      return data.ret;
>  }
> diff --git a/block/qed-table.c b/block/qed-table.c
> index 1a731df..b7d53ce 100644
> --- a/block/qed-table.c
> +++ b/block/qed-table.c
> @@ -174,9 +174,7 @@ int qed_read_l1_table_sync(BDRVQEDState *s)
>  
>      qed_read_table(s, s->header.l1_table_offset,
>                     s->l1_table, qed_sync_cb, &ret);
> -    while (ret == -EINPROGRESS) {
> -        aio_poll(bdrv_get_aio_context(s->bs), true);
> -    }
> +    bdrv_poll_while(s->bs, ret == -EINPROGRESS);
>  
>      return ret;
>  }
> @@ -195,9 +193,7 @@ int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int 
> index,
>      int ret = -EINPROGRESS;
>  
>      qed_write_l1_table(s, index, n, qed_sync_cb, &ret);
> -    while (ret == -EINPROGRESS) {
> -        aio_poll(bdrv_get_aio_context(s->bs), true);
> -    }
> +    bdrv_poll_while(s->bs, ret == -EINPROGRESS);
>  
>      return ret;
>  }
> @@ -268,9 +264,7 @@ int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest 
> *request, uint64_t offset
>      int ret = -EINPROGRESS;
>  
>      qed_read_l2_table(s, request, offset, qed_sync_cb, &ret);
> -    while (ret == -EINPROGRESS) {
> -        aio_poll(bdrv_get_aio_context(s->bs), true);
> -    }
> +    bdrv_poll_while(s->bs, ret == -EINPROGRESS);
>  
>      return ret;
>  }
> @@ -290,9 +284,7 @@ int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest 
> *request,
>      int ret = -EINPROGRESS;
>  
>      qed_write_l2_table(s, request, index, n, flush, qed_sync_cb, &ret);
> -    while (ret == -EINPROGRESS) {
> -        aio_poll(bdrv_get_aio_context(s->bs), true);
> -    }
> +    bdrv_poll_while(s->bs, ret == -EINPROGRESS);
>  
>      return ret;
>  }
> diff --git a/block/qed.c b/block/qed.c
> index 1a7ef0a..dcb5fb9 100644
> --- a/block/qed.c
> +++ b/block/qed.c
> @@ -354,7 +354,9 @@ static void qed_start_need_check_timer(BDRVQEDState *s)
>  static void qed_cancel_need_check_timer(BDRVQEDState *s)
>  {
>      trace_qed_cancel_need_check_timer(s);
> -    timer_del(s->need_check_timer);
> +    if (s->need_check_timer) {
> +        timer_del(s->need_check_timer);
> +    }
>  }

This belongs to a separate patch, or deserves an explanation in the commit
message.

>  
>  static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
> diff --git a/include/block/block.h b/include/block/block.h
> index 107c603..876a1e7 100644
> --- a/include/block/block.h
> +++ b/include/block/block.h
> @@ -340,6 +340,15 @@ void bdrv_drain(BlockDriverState *bs);
>  void coroutine_fn bdrv_co_drain(BlockDriverState *bs);
>  void bdrv_drain_all(void);
>  
> +#define bdrv_poll_while(bs, cond) ({                       \
> +    bool waited_ = false;                                  \
> +    BlockDriverState *bs_ = (bs);                          \
> +    while ((cond)) {                                       \
> +        aio_poll(bdrv_get_aio_context(bs_), true);         \
> +        waited_ = true;                                    \
> +    }                                                      \
> +    waited_; })
> +
>  int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count);
>  int bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset, int count);
>  int bdrv_has_zero_init_1(BlockDriverState *bs);
> diff --git a/include/block/block_int.h b/include/block/block_int.h
> index 5a7308b..11f877b 100644
> --- a/include/block/block_int.h
> +++ b/include/block/block_int.h
> @@ -786,6 +786,7 @@ void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap 
> *bitmap, HBitmap *in);
>  
>  void bdrv_inc_in_flight(BlockDriverState *bs);
>  void bdrv_dec_in_flight(BlockDriverState *bs);
> +void bdrv_wakeup(BlockDriverState *bs);
>  
>  void blockdev_close_all_bdrv_states(void);
>  
> -- 
> 2.7.4
> 
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]