qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [v1 PATCH 2/3]: Helper routines to use GLib threadpool


From: Arun R Bharadwaj
Subject: Re: [Qemu-devel] [v1 PATCH 2/3]: Helper routines to use GLib threadpool infrastructure in 9pfs.
Date: Tue, 15 Mar 2011 21:00:03 +0530
User-agent: Mutt/1.5.20 (2009-06-14)

* Harsh Bora <address@hidden> [2011-03-15 17:15:48]:

> On 03/15/2011 04:08 PM, Arun R Bharadwaj wrote:
> >* Arun R Bharadwaj<address@hidden>  [2011-03-15 16:04:53]:
> >
> >Author: Arun R Bharadwaj<address@hidden>
> >Date:   Thu Mar 10 15:11:49 2011 +0530
> >
> >     Helper routines to use GLib threadpool infrastructure in 9pfs.
> >
> >     This patch creates helper routines to make use of the
> >     threadpool infrastructure provided by GLib. This is based
> >     on the prototype patch by Anthony which does a similar thing
> >     for posix-aio-compat.c
> >
> >     An example use case is provided in the next patch where one
> >     of the syscalls in 9pfs is converted into the threaded model
> >     using these helper routines.
> >
> >     Signed-off-by: Arun R Bharadwaj<address@hidden>
> >     Reviewed-by: Aneesh Kumar K.V<address@hidden>
> >
> >diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
> >index dceefd5..cf61345 100644
> >--- a/hw/9pfs/virtio-9p.c
> >+++ b/hw/9pfs/virtio-9p.c
> >@@ -18,6 +18,8 @@
> >  #include "fsdev/qemu-fsdev.h"
> >  #include "virtio-9p-debug.h"
> >  #include "virtio-9p-xattr.h"
> >+#include "signal.h"
> >+#include "qemu-thread.h"
> >
> >  int debug_9p_pdu;
> >  static void v9fs_reclaim_fd(V9fsState *s);
> >@@ -36,6 +38,89 @@ enum {
> >      Oappend = 0x80,
> >  };
> >
> >+typedef struct V9fsPool {
> >+    GThreadPool *pool;
> >+    GList *requests;
> >+    int rfd;
> >+    int wfd;
> >+} V9fsPool;
> >+
> >+static V9fsPool v9fs_pool;
> >+
> >+static void v9fs_qemu_submit_request(V9fsRequest *req)
> >+{
> >+    V9fsPool *p =&v9fs_pool;
> >+
> >+    p->requests = g_list_append(p->requests, req);
> >+    g_thread_pool_push(v9fs_pool.pool, req, NULL);
> >+}
> >+
> >+static void die2(int err, const char *what)
> >+{
> >+    fprintf(stderr, "%s failed: %s\n", what, strerror(err));
> >+    abort();
> >+}
> >+
> >+static void die(const char *what)
> >+{
> >+    die2(errno, what);
> >+}
> >+
> >+static void v9fs_qemu_process_post_ops(void *arg)
> >+{
> >+    struct V9fsPool *p =&v9fs_pool;
> >+    struct V9fsPostOp *post_op;
> >+    char byte;
> >+    ssize_t len;
> >+    GList *cur_req, *next_req;
> >+
> >+    do {
> >+        len = read(p->rfd,&byte, sizeof(byte));
> >+    } while (len == -1&&  errno == EINTR);
> >+
> >+    for (cur_req = p->requests; cur_req != NULL; cur_req = next_req) {
> >+        V9fsRequest *req = cur_req->data;
> >+        next_req = g_list_next(cur_req);
> >+
> >+        if (!req->done) {
> >+            continue;
> >+        }
> >+
> >+        post_op =&req->post_op;
> >+        post_op->func(post_op->arg);
> >+        p->requests = g_list_remove_link(p->requests, cur_req);
> >+        g_list_free(p->requests);
> >+    }
> >+}
> >+
> >+static inline void v9fs_thread_signal(void)
> >+{
> >+    struct V9fsPool *p =&v9fs_pool;
> >+    char byte = 0;
> >+    ssize_t ret;
> >+
> >+    do {
> >+        ret = write(p->wfd,&byte, sizeof(byte));
> >+    } while (ret == -1&&  errno == EINTR);
> >+
> >+    if (ret<  0&&  errno != EAGAIN) {
> >+        die("write() in v9fs");
> >+    }
> >+
> >+    if (kill(getpid(), SIGUSR2)) {
> 
> Not sure whether using pthread_kill or qemu_thread_signal is better
> to go with?
> 

Please check the other replies. Looks like we don't really need
signalling.

> >+        die("kill failed");
> >+    }
> >+}
> >+
> >+static void v9fs_thread_routine(gpointer data, gpointer user_data)
> >+{
> >+    V9fsRequest *req = data;
> >+
> >+    req->func(req);
> >+    v9fs_thread_signal();
> >+    req->done = 1;
> 
> Shouldn't it be in reverse order, setting flag first and then signal:
>     req->done = 1;
>     v9fs_thread_signal();
> 
> >+}
> >+
> >  static int omode_to_uflags(int8_t mode)
> >  {
> >      int ret = 0;
> >@@ -3850,7 +3935,8 @@ VirtIODevice *virtio_9p_init(DeviceState *dev, 
> >V9fsConf *conf)
> >      int i, len;
> >      struct stat stat;
> >      FsTypeEntry *fse;
> >-
> >+    int fds[2];
> >+    V9fsPool *p =&v9fs_pool;
> >
> >      s = (V9fsState *)virtio_common_init("virtio-9p",
> >                                      VIRTIO_ID_9P,
> >@@ -3939,5 +4025,21 @@ VirtIODevice *virtio_9p_init(DeviceState *dev, 
> >V9fsConf *conf)
> >                          s->tag_len;
> >      s->vdev.get_config = virtio_9p_get_config;
> >
> >+    if (qemu_pipe(fds) == -1) {
> >+        fprintf(stderr, "failed to create fd's for virtio-9p\n");
> >+        exit(1);
> >+    }
> >+
> >+    p->pool = g_thread_pool_new(v9fs_thread_routine, p, 8, FALSE, NULL);
> >+    p->rfd = fds[0];
> >+    p->wfd = fds[1];
> >+
> >+    fcntl(p->rfd, F_SETFL, O_NONBLOCK);
> >+    fcntl(p->wfd, F_SETFL, O_NONBLOCK);
> >+
> >+    qemu_set_fd_handler(p->rfd, v9fs_qemu_process_post_ops, NULL, NULL);
> >+
> >+    (void) v9fs_qemu_submit_request;
> 
> Do we really need it ^ ?
> 

Yeah. Because till we make use of v9fs_qemu_submit_request() in the
next patch, we will get a "v9fs_qemu_submit_request defined but not
used" compile error.

> - Harsh
> 
> >+
> >      return&s->vdev;
> >  }
> >diff --git a/hw/9pfs/virtio-9p.h b/hw/9pfs/virtio-9p.h
> >index 10809ba..e7d2326 100644
> >--- a/hw/9pfs/virtio-9p.h
> >+++ b/hw/9pfs/virtio-9p.h
> >@@ -124,6 +124,20 @@ struct V9fsPDU
> >      QLIST_ENTRY(V9fsPDU) next;
> >  };
> >
> >+typedef struct V9fsPostOp {
> >+    /* Post Operation routine to execute after executing syscall */
> >+    void (*func)(void *arg);
> >+    void *arg;
> >+} V9fsPostOp;
> >+
> >+typedef struct V9fsRequest {
> >+    void (*func)(struct V9fsRequest *req);
> >+
> >+    /* Flag to indicate that request is satisfied, ready for 
> >post-processing */
> >+    int done;
> >+
> >+    V9fsPostOp post_op;
> >+} V9fsRequest;
> >
> >  /* FIXME
> >   * 1) change user needs to set groups and stuff
> >
> >
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]