qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC] thread-pool: Add option to fix the pool size


From: Nicolas Saenz Julienne
Subject: [RFC] thread-pool: Add option to fix the pool size
Date: Wed, 2 Feb 2022 18:52:34 +0100

The thread pool regulates itself: when idle, it kills threads until
empty, when in demand, it creates new threads until full. This behaviour
doesn't play well with latency sensitive workloads where the price of
creating a new thread is too high. For example, when paired with qemu's
'-mlock', or using safety features like SafeStack, creating a new thread
has been measured take multiple milliseconds.

In order to mitigate this let's introduce a new option to set a fixed
pool size. The threads will be created during the pool's initialization,
remain available during its lifetime regardless of demand, and destroyed
upon freeing it. A properly characterized workload will then be able to
configure the pool to avoid any latency spike.

Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>

---

The fix I propose here works for my specific use-case, but I'm pretty
sure it'll need to be a bit more versatile to accommodate other
use-cases.

Some questions:

- Is unanimously setting these parameters for any pool instance too
  limiting? It'd make sense to move the options into the AioContext the
  pool belongs to. IIUC, for the general block use-case, this would be
  'qemu_aio_context' as initialized in qemu_init_main_loop().

- Currently I'm setting two pool properties through a single qemu
  option. The pool's size and dynamic behaviour, or lack thereof. I
  think it'd be better to split them into separate options. I thought of
  different ways of expressing this (min/max-size where static happens
  when min-size=max-size, size and static/dynamic, etc..), but you might
  have ideas on what could be useful to other use-cases.

Some background on my workload: I'm using IDE emulation, the guest is an
old RTOS that doesn't support virtio, using 'aio=native' isn't possible
either (unaligned IO accesses).

Thanks!

 include/block/thread-pool.h |  2 ++
 qemu-options.hx             | 21 +++++++++++++++++++++
 softmmu/vl.c                | 28 ++++++++++++++++++++++++++++
 util/thread-pool.c          | 20 +++++++++++++++++---
 4 files changed, 68 insertions(+), 3 deletions(-)

diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
index 7dd7d730a0..3337971669 100644
--- a/include/block/thread-pool.h
+++ b/include/block/thread-pool.h
@@ -23,6 +23,8 @@
 typedef int ThreadPoolFunc(void *opaque);
 
 typedef struct ThreadPool ThreadPool;
+extern int thread_pool_max_threads;
+extern bool thread_pool_fixed_size;
 
 ThreadPool *thread_pool_new(struct AioContext *ctx);
 void thread_pool_free(ThreadPool *pool);
diff --git a/qemu-options.hx b/qemu-options.hx
index ba3ae6a42a..cb8f50db66 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -4627,6 +4627,27 @@ SRST
     user-provided config files on sysconfdir.
 ERST
 
+DEF("thread-pool", HAS_ARG, QEMU_OPTION_threadpool,
+    "-thread-pool fixed-size=[n]\n"
+    "               Sets the number of threads always available in the 
pool.\n",
+    QEMU_ARCH_ALL)
+SRST
+``-thread-pool fixed-size=[n]``
+    The ``fixed-size=value`` option sets the number of readily available
+    threads in the pool. When set, the pool will create the threads during
+    initialization and will abstain from growing or shrinking during runtime.
+    This moves the burden of properly sizing the pool to the user in exchange
+    for a more deterministic thread pool behaviour. The number of threads has
+    to be greater than 0.
+
+    When not used, the thread pool size will change dynamically based on
+    demand: converging to being empty when idle and maxing out at 64 threads.
+
+    This option targets real-time systems sensitive to the latency introduced
+    by creating new threads during runtime. Performance sensitive use-cases are
+    better-off not using this.
+ERST
+
 DEF("trace", HAS_ARG, QEMU_OPTION_trace,
     "-trace [[enable=]<pattern>][,events=<file>][,file=<file>]\n"
     "                specify tracing options\n",
diff --git a/softmmu/vl.c b/softmmu/vl.c
index 5e1b35ba48..6a44cc1818 100644
--- a/softmmu/vl.c
+++ b/softmmu/vl.c
@@ -72,6 +72,7 @@
 #include "qemu/log.h"
 #include "sysemu/blockdev.h"
 #include "hw/block/block.h"
+#include "block/thread-pool.h"
 #include "hw/i386/x86.h"
 #include "hw/i386/pc.h"
 #include "migration/misc.h"
@@ -496,6 +497,19 @@ static QemuOptsList qemu_action_opts = {
     },
 };
 
+static QemuOptsList qemu_thread_pool_opts = {
+    .name = "thread-pool",
+    .head = QTAILQ_HEAD_INITIALIZER(qemu_thread_pool_opts.head),
+    .desc = {
+        {
+            .name = "fixed-size",
+            .type = QEMU_OPT_NUMBER,
+            .help = "Sets the number of threads available in the pool",
+        },
+        { /* end of list */ }
+    },
+};
+
 const char *qemu_get_vm_name(void)
 {
     return qemu_name;
@@ -2809,6 +2823,7 @@ void qemu_init(int argc, char **argv, char **envp)
     qemu_add_opts(&qemu_semihosting_config_opts);
     qemu_add_opts(&qemu_fw_cfg_opts);
     qemu_add_opts(&qemu_action_opts);
+    qemu_add_opts(&qemu_thread_pool_opts);
     module_call_init(MODULE_INIT_OPTS);
 
     error_init(argv[0]);
@@ -3658,6 +3673,19 @@ void qemu_init(int argc, char **argv, char **envp)
             case QEMU_OPTION_nouserconfig:
                 /* Nothing to be parsed here. Especially, do not error out 
below. */
                 break;
+            case QEMU_OPTION_threadpool:
+                opts = qemu_opts_parse_noisily(qemu_find_opts("thread-pool"),
+                                               optarg, false);
+                if (!opts) {
+                    exit(1);
+                }
+                thread_pool_max_threads = qemu_opt_get_number(opts, 
"fixed-size", 0);
+                if (thread_pool_max_threads <= 0) {
+                    error_report("fixed-size is invalid");
+                    exit(1);
+                }
+                thread_pool_fixed_size = true;
+                break;
             default:
                 if (os_parse_cmd_args(popt->index, optarg)) {
                     error_report("Option not supported in this build");
diff --git a/util/thread-pool.c b/util/thread-pool.c
index d763cea505..3081f502ff 100644
--- a/util/thread-pool.c
+++ b/util/thread-pool.c
@@ -25,6 +25,8 @@
 static void do_spawn_thread(ThreadPool *pool);
 
 typedef struct ThreadPoolElement ThreadPoolElement;
+int thread_pool_max_threads = 64;
+bool thread_pool_fixed_size;
 
 enum ThreadState {
     THREAD_QUEUED,
@@ -59,6 +61,7 @@ struct ThreadPool {
     QemuCond worker_stopped;
     QemuSemaphore sem;
     int max_threads;
+    bool fixed_size;
     QEMUBH *new_thread_bh;
 
     /* The following variables are only accessed from one AioContext. */
@@ -83,12 +86,16 @@ static void *worker_thread(void *opaque)
 
     while (!pool->stopping) {
         ThreadPoolElement *req;
-        int ret;
+        int ret = 0;
 
         do {
             pool->idle_threads++;
             qemu_mutex_unlock(&pool->lock);
-            ret = qemu_sem_timedwait(&pool->sem, 10000);
+            if (pool->fixed_size) {
+                qemu_sem_wait(&pool->sem);
+            } else {
+                ret = qemu_sem_timedwait(&pool->sem, 10000);
+            }
             qemu_mutex_lock(&pool->lock);
             pool->idle_threads--;
         } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list));
@@ -306,11 +313,18 @@ static void thread_pool_init_one(ThreadPool *pool, 
AioContext *ctx)
     qemu_mutex_init(&pool->lock);
     qemu_cond_init(&pool->worker_stopped);
     qemu_sem_init(&pool->sem, 0);
-    pool->max_threads = 64;
+    pool->max_threads = thread_pool_max_threads;
+    pool->fixed_size = thread_pool_fixed_size;
     pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool);
 
     QLIST_INIT(&pool->head);
     QTAILQ_INIT(&pool->request_list);
+
+    for (int i = 0; pool->fixed_size && i < pool->max_threads; i++) {
+        qemu_mutex_lock(&pool->lock);
+        spawn_thread(pool);
+        qemu_mutex_unlock(&pool->lock);
+    }
 }
 
 ThreadPool *thread_pool_new(AioContext *ctx)
-- 
2.34.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]