qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 3/4] vhost-user-rng: backend: Add RNG vhost-user daemon im


From: Mathieu Poirier
Subject: Re: [PATCH v3 3/4] vhost-user-rng: backend: Add RNG vhost-user daemon implementation
Date: Thu, 22 Jul 2021 11:54:24 -0600

On Wed, Jul 21, 2021 at 09:14:31PM +0100, Alex Bennée wrote:
> 
> Mathieu Poirier <mathieu.poirier@linaro.org> writes:
> 
> > This patch provides the vhost-user backend implementation to work
> > in tandem with the vhost-user-rng implementation of the QEMU VMM.
> >
> > It uses the vhost-user API so that other VMM can re-use the interface
> > without having to write the driver again.
> >
> > Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
> 
> Try the following patch which creates a nested main loop and runs it
> until the g_timeout_add fires again.
> 
> --8<---------------cut here---------------start------------->8---
> tools/virtio/vhost-user-rng: avoid mutex by using nested main loop
> 
> As we are blocking anyway all we really need to do is run a main loop
> until the timer fires and the data is consumed.
> 

Right, I made the implemenation blocking to be as close as possible to what
virtio-rng does.

I took a look at your patch below and it should do the trick.  Testing yielded
the same results as my solution so this is good.  To me the nested loop is a
little unorthodox to solve this kind of problem but it has less lines of code
and avoids spinning a new thread to deal with the timer.

I'll send another revision.

Thanks for the review,
Mathieu

> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
> 
> 1 file changed, 30 insertions(+), 76 deletions(-)
> tools/vhost-user-rng/main.c | 106 +++++++++++++-------------------------------
> 
> modified   tools/vhost-user-rng/main.c
> @@ -42,13 +42,10 @@
>  
>  typedef struct {
>      VugDev dev;
> -    struct itimerspec ts;
> -    timer_t rate_limit_timer;
> -    pthread_mutex_t rng_mutex;
> -    pthread_cond_t rng_cond;
>      int64_t quota_remaining;
> -    bool activate_timer;
> +    guint timer;
>      GMainLoop *loop;
> +    GMainLoop *blocked;
>  } VuRNG;
>  
>  static gboolean print_cap, verbose;
> @@ -59,66 +56,26 @@ static gint source_fd, socket_fd = -1;
>  static uint32_t period_ms = 1 << 16;
>  static uint64_t max_bytes = INT64_MAX;
>  
> -static void check_rate_limit(union sigval sv)
> +static gboolean check_rate_limit(gpointer user_data)
>  {
> -    VuRNG *rng = sv.sival_ptr;
> -    bool wakeup = false;
> +    VuRNG *rng = (VuRNG *) user_data;
>  
> -    pthread_mutex_lock(&rng->rng_mutex);
> -    /*
> -     * The timer has expired and the guest has used all available
> -     * entropy, which means function vu_rng_handle_request() is waiting
> -     * on us.  As such wake it up once we're done here.
> -     */
> -    if (rng->quota_remaining == 0) {
> -        wakeup = true;
> +    if (rng->blocked) {
> +        g_info("%s: called while blocked", __func__);
> +        g_main_loop_quit(rng->blocked);
>      }
> -
>      /*
>       * Reset the entropy available to the guest and tell function
>       * vu_rng_handle_requests() to start the timer before using it.
>       */
>      rng->quota_remaining = max_bytes;
> -    rng->activate_timer = true;
> -    pthread_mutex_unlock(&rng->rng_mutex);
> -
> -    if (wakeup) {
> -        pthread_cond_signal(&rng->rng_cond);
> -    }
> -}
> -
> -static void setup_timer(VuRNG *rng)
> -{
> -    struct sigevent sev;
> -    int ret;
> -
> -    memset(&rng->ts, 0, sizeof(struct itimerspec));
> -    rng->ts.it_value.tv_sec = period_ms / 1000;
> -    rng->ts.it_value.tv_nsec = (period_ms % 1000) * 1000000;
> -
> -    /*
> -     * Call function check_rate_limit() as if it was the start of
> -     * a new thread when the timer expires.
> -     */
> -    sev.sigev_notify = SIGEV_THREAD;
> -    sev.sigev_notify_function = check_rate_limit;
> -    sev.sigev_value.sival_ptr = rng;
> -    /* Needs to be NULL if defaults attributes are to be used. */
> -    sev.sigev_notify_attributes = NULL;
> -    ret = timer_create(CLOCK_MONOTONIC, &sev, &rng->rate_limit_timer);
> -    if (ret < 0) {
> -        fprintf(stderr, "timer_create() failed\n");
> -    }
> -
> +    return true;
>  }
>  
> -
>  /* Virtio helpers */
>  static uint64_t rng_get_features(VuDev *dev)
>  {
> -    if (verbose) {
> -        g_info("%s: replying", __func__);
> -    }
> +    g_info("%s: replying", __func__);
>      return 0;
>  }
>  
> @@ -137,7 +94,7 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
>      VuVirtq *vq = vu_get_queue(dev, qidx);
>      VuVirtqElement *elem;
>      size_t to_read;
> -    int len, ret;
> +    int len;
>  
>      for (;;) {
>          /* Get element in the vhost virtqueue */
> @@ -149,24 +106,21 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
>          /* Get the amount of entropy to read from the vhost server */
>          to_read = elem->in_sg[0].iov_len;
>  
> -        pthread_mutex_lock(&rng->rng_mutex);
> -
>          /*
>           * We have consumed all entropy available for this time slice.
>           * Wait for the timer (check_rate_limit()) to tell us about the
>           * start of a new time slice.
>           */
>          if (rng->quota_remaining == 0) {
> -            pthread_cond_wait(&rng->rng_cond, &rng->rng_mutex);
> -        }
> -
> -        /* Start the timer if the last time slice has expired */
> -        if (rng->activate_timer == true) {
> -            rng->activate_timer = false;
> -            ret = timer_settime(rng->rate_limit_timer, 0, &rng->ts, NULL);
> -            if (ret < 0) {
> -                fprintf(stderr, "timer_settime() failed\n");
> -            }
> +            g_assert(!rng->blocked);
> +            rng->blocked = 
> g_main_loop_new(g_main_loop_get_context(rng->loop), false);
> +            g_info("attempting to consume %ld bytes but no quota left (%s)",
> +                   to_read,
> +                   g_main_loop_is_running(rng->loop) ? "running" : "not 
> running");
> +            g_main_loop_run(rng->blocked);
> +            g_info("return from blocked loop: %ld", rng->quota_remaining);
> +            g_main_loop_unref(rng->blocked);
> +            rng->blocked = false;
>          }
>  
>          /* Make sure we don't read more than it's available */
> @@ -183,8 +137,6 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
>  
>          rng->quota_remaining -= len;
>  
> -        pthread_mutex_unlock(&rng->rng_mutex);
> -
>          vu_queue_push(dev, vq, elem, len);
>          free(elem);
>      }
> @@ -373,6 +325,7 @@ int main(int argc, char *argv[])
>       * can add it's GSource watches.
>       */
>      rng.loop = g_main_loop_new(NULL, FALSE);
> +    rng.blocked = NULL;
>  
>      if (!vug_init(&rng.dev, 1, g_socket_get_fd(socket),
>                    panic, &vuiface)) {
> @@ -380,24 +333,25 @@ int main(int argc, char *argv[])
>          exit(EXIT_FAILURE);
>      }
>  
> -    rng.quota_remaining = max_bytes;
> -    rng.activate_timer = true;
> -    pthread_mutex_init(&rng.rng_mutex, NULL);
> -    pthread_cond_init(&rng.rng_cond, NULL);
> -    setup_timer(&rng);
> -
>      if (verbose) {
> -        g_info("period_ms: %d tv_sec: %ld tv_nsec: %lu\n",
> -               period_ms, rng.ts.it_value.tv_sec, rng.ts.it_value.tv_nsec);
> +        g_log_set_handler(NULL, G_LOG_LEVEL_MASK, g_log_default_handler, 
> NULL);
> +        g_setenv("G_MESSAGES_DEBUG", "all", true);
> +    } else {
> +        g_log_set_handler(NULL,
> +                          G_LOG_LEVEL_WARNING | G_LOG_LEVEL_CRITICAL | 
> G_LOG_LEVEL_ERROR,
> +                          g_log_default_handler, NULL);
>      }
>  
> +    rng.quota_remaining = max_bytes;
> +    rng.timer = g_timeout_add(period_ms, check_rate_limit, &rng);
> +    g_info("period_ms: %"PRId32", timer %d\n", period_ms, rng.timer);
> +
>      g_message("entering main loop, awaiting messages");
>      g_main_loop_run(rng.loop);
>      g_message("finished main loop, cleaning up");
>  
>      g_main_loop_unref(rng.loop);
>      vug_deinit(&rng.dev);
> -    timer_delete(rng.rate_limit_timer);
>      close(source_fd);
>      unlink(socket_path);
>  }
> --8<---------------cut here---------------end--------------->8---
> 
> -- 
> Alex Bennée



reply via email to

[Prev in Thread] Current Thread [Next in Thread]