qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC] [PATCHv4 10/13] aio / timers: Convert mainloop to


From: Paolo Bonzini
Subject: Re: [Qemu-devel] [RFC] [PATCHv4 10/13] aio / timers: Convert mainloop to use timeout
Date: Thu, 1 Aug 2013 14:41:52 +0200

 On Jul 26 2013, Alex Bligh wrote:
> Convert mainloop to use timeout from 3 static timers.
> 
> Signed-off-by: Alex Bligh <address@hidden>
> ---
>  main-loop.c |   48 +++++++++++++++++++++++++++++++++++++-----------
>  1 file changed, 37 insertions(+), 11 deletions(-)
> 
> diff --git a/main-loop.c b/main-loop.c
> index a44fff6..c30978b 100644
> --- a/main-loop.c
> +++ b/main-loop.c
> @@ -155,10 +155,11 @@ static int max_priority;
>  static int glib_pollfds_idx;
>  static int glib_n_poll_fds;
>  
> -static void glib_pollfds_fill(uint32_t *cur_timeout)
> +static void glib_pollfds_fill(int64_t *cur_timeout)
>  {
>      GMainContext *context = g_main_context_default();
>      int timeout = 0;
> +    int64_t timeout_ns;
>      int n;
>  
>      g_main_context_prepare(context, &max_priority);
> @@ -174,9 +175,13 @@ static void glib_pollfds_fill(uint32_t *cur_timeout)
>                                   glib_n_poll_fds);
>      } while (n != glib_n_poll_fds);
>  
> -    if (timeout >= 0 && timeout < *cur_timeout) {
> -        *cur_timeout = timeout;
> +    if (timeout < 0) {
> +        timeout_ns = -1;
> +    } else {
> +        timeout_ns = (int64_t)timeout * (int64_t)SCALE_MS;
>      }
> +
> +    *cur_timeout = qemu_soonest_timeout(timeout_ns, *cur_timeout);
>  }
>  
>  static void glib_pollfds_poll(void)
> @@ -191,7 +196,7 @@ static void glib_pollfds_poll(void)
>  
>  #define MAX_MAIN_LOOP_SPIN (1000)
>  
> -static int os_host_main_loop_wait(uint32_t timeout)
> +static int os_host_main_loop_wait(int64_t timeout)
>  {
>      int ret;
>      static int spin_counter;
> @@ -214,7 +219,7 @@ static int os_host_main_loop_wait(uint32_t timeout)
>              notified = true;
>          }
>  
> -        timeout = 1;
> +        timeout = SCALE_MS;
>      }
>  
>      if (timeout > 0) {
> @@ -224,7 +229,7 @@ static int os_host_main_loop_wait(uint32_t timeout)
>          spin_counter++;
>      }
>  
> -    ret = g_poll((GPollFD *)gpollfds->data, gpollfds->len, timeout);
> +    ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout);
>  
>      if (timeout > 0) {
>          qemu_mutex_lock_iothread();
> @@ -373,7 +378,7 @@ static void pollfds_poll(GArray *pollfds, int nfds, 
> fd_set *rfds,
>      }
>  }
>  
> -static int os_host_main_loop_wait(uint32_t timeout)
> +static int os_host_main_loop_wait(int64_t timeout)
>  {
>      GMainContext *context = g_main_context_default();
>      GPollFD poll_fds[1024 * 2]; /* this is probably overkill */
> @@ -382,6 +387,7 @@ static int os_host_main_loop_wait(uint32_t timeout)
>      PollingEntry *pe;
>      WaitObjects *w = &wait_objects;
>      gint poll_timeout;
> +    int64_t poll_timeout_ns;
>      static struct timeval tv0;
>      fd_set rfds, wfds, xfds;
>      int nfds;
> @@ -419,12 +425,17 @@ static int os_host_main_loop_wait(uint32_t timeout)
>          poll_fds[n_poll_fds + i].events = G_IO_IN;
>      }
>  
> -    if (poll_timeout < 0 || timeout < poll_timeout) {
> -        poll_timeout = timeout;
> +    if (poll_timeout < 0) {
> +        poll_timeout_ns = -1;
> +    } else {
> +        poll_timeout_ns = (int64_t)poll_timeout * (int64_t)SCALE_MS;
>      }
>  
> +    poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout);
> +
>      qemu_mutex_unlock_iothread();
> -    g_poll_ret = g_poll(poll_fds, n_poll_fds + w->num, poll_timeout);
> +    g_poll_ret = qemu_poll_ns(poll_fds, n_poll_fds + w->num, 
> poll_timeout_ns);
> +
>      qemu_mutex_lock_iothread();
>      if (g_poll_ret > 0) {
>          for (i = 0; i < w->num; i++) {
> @@ -449,6 +460,7 @@ int main_loop_wait(int nonblocking)
>  {
>      int ret;
>      uint32_t timeout = UINT32_MAX;
> +    int64_t timeout_ns;
>  
>      if (nonblocking) {
>          timeout = 0;
> @@ -462,7 +474,21 @@ int main_loop_wait(int nonblocking)
>      slirp_pollfds_fill(gpollfds);
>  #endif
>      qemu_iohandler_fill(gpollfds);
> -    ret = os_host_main_loop_wait(timeout);
> +
> +    if (timeout == UINT32_MAX) {
> +        timeout_ns = -1;
> +    } else {
> +        timeout_ns = (uint64_t)timeout * (int64_t)(SCALE_MS);
> +    }
> +
> +    timeout_ns = qemu_soonest_timeout(timeout_ns,
> +                                      qemu_clock_deadline_ns(rt_clock));
> +    timeout_ns = qemu_soonest_timeout(timeout_ns,
> +                                      qemu_clock_deadline_ns(vm_clock));

This must not be included if use_icount.

Allowing only one rt_clock clock for each AioContext is a simplification,
but I'm worried that it will be a problem later.  For example, the block
layer wants to use vm_clock.  Perhaps QEMUTimerList should really have
three lists, one for each clock type?

Once you do this, you get some complications due to more data structures,
but other code is simplified noticeably.  For example, you lose the concept
of a default timerlist (it's just the timerlist of the default AioContext).
And because all timerlists have an AioContext, you do not need to special
case aio_notify() vs. qemu_notify_event().

There are a couple of places to be careful about, of course.  For example,

        if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
            qemu_notify_event();
        }

in cpus.c must be changed to iterate over all timerlists.

Paolo

> +    timeout_ns = qemu_soonest_timeout(timeout_ns,
> +                                      qemu_clock_deadline_ns(host_clock));
> +
> +    ret = os_host_main_loop_wait(timeout_ns);
>      qemu_iohandler_poll(gpollfds, ret);
>  #ifdef CONFIG_SLIRP
>      slirp_pollfds_poll(gpollfds, (ret < 0));
> -- 
> 1.7.9.5
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]