qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] Re: [patch 2/7] qemu: separate thread for io


From: Anthony Liguori
Subject: [Qemu-devel] Re: [patch 2/7] qemu: separate thread for io
Date: Fri, 20 Mar 2009 12:43:49 -0500
User-agent: Thunderbird 2.0.0.19 (X11/20090105)

address@hidden wrote:
Index: qemu/exec.c
===================================================================
--- qemu.orig/exec.c
+++ qemu/exec.c
@@ -1513,6 +1513,20 @@ void cpu_interrupt(CPUState *env, int ma
     /* FIXME: This is probably not threadsafe.  A different thread could
        be in the middle of a read-modify-write operation.  */
     env->interrupt_request |= mask;
+
+    switch(mask) {
+    case CPU_INTERRUPT_HARD:
+    case CPU_INTERRUPT_SMI:
+    case CPU_INTERRUPT_NMI:
+    case CPU_INTERRUPT_EXIT:
+    /*
+     * only unlink the TB's if we're called from cpu thread context,
+     * otherwise signal cpu thread to do it.
+     */
+        if (qemu_notify_event(env))
+               return;
+    }
+
 #if defined(USE_NPTL)
     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
        problem and hope the cpu will stop of its own accord.  For userspace
Index: qemu/qemu-common.h
===================================================================
--- qemu.orig/qemu-common.h
+++ qemu/qemu-common.h
@@ -190,6 +190,8 @@ int cpu_load(QEMUFile *f, void *opaque, /* Force QEMU to stop what it's doing and service IO */
 void qemu_service_io(void);
+void main_loop_break(void);
+int qemu_notify_event(void *env);
typedef struct QEMUIOVector {
     struct iovec *iov;
Index: qemu/vl.c
===================================================================
--- qemu.orig/vl.c
+++ qemu/vl.c
@@ -155,6 +155,7 @@
 //#define DEBUG_NET
 //#define DEBUG_SLIRP
+#define SIG_IPI (SIGRTMIN+4) #ifdef DEBUG_IOPORT
 #  define LOG_IOPORT(...) qemu_log_mask(CPU_LOG_IOPORT, ## __VA_ARGS__)
@@ -265,6 +266,10 @@ static QEMUTimer *nographic_timer;
 uint8_t qemu_uuid[16];
QemuMutex qemu_global_mutex;
+QemuMutex qemu_fair_mutex;
+
+QemuThread io_thread;
+QemuThread cpus_thread;
/***********************************************************/
 /* x86 ISA bus support */
@@ -1328,7 +1333,6 @@ static void host_alarm_handler(int host_
                                qemu_get_clock(vm_clock))) ||
         qemu_timer_expired(active_timers[QEMU_TIMER_REALTIME],
                            qemu_get_clock(rt_clock))) {
-        CPUState *env = next_cpu;
#ifdef _WIN32
         struct qemu_alarm_win32 *data = ((struct 
qemu_alarm_timer*)dwUser)->priv;
@@ -1339,15 +1343,6 @@ static void host_alarm_handler(int host_
 #endif
         alarm_timer->flags |= ALARM_FLAG_EXPIRED;
- if (env) {
-            /* stop the currently executing cpu because a timer occured */
-            cpu_interrupt(env, CPU_INTERRUPT_EXIT);
-#ifdef USE_KQEMU
-            if (env->kqemu_enabled) {
-                kqemu_cpu_interrupt(env);
-            }
-#endif
-        }
         event_pending = 1;
     }
 }
@@ -2878,6 +2873,7 @@ int qemu_set_fd_handler2(int fd,
         ioh->opaque = opaque;
         ioh->deleted = 0;
     }
+    main_loop_break();
     return 0;
 }
@@ -3334,6 +3330,7 @@ void qemu_bh_schedule(QEMUBH *bh)
     if (env) {
         cpu_interrupt(env, CPU_INTERRUPT_EXIT);
     }
+    main_loop_break();
 }
void qemu_bh_cancel(QEMUBH *bh)
@@ -3611,6 +3608,155 @@ static void host_main_loop_wait(int *tim
 }
 #endif
+static int wait_signal(int timeout)
+{
+    struct timespec ts;
+    sigset_t waitset;
+
+    ts.tv_sec = timeout / 1000;
+    ts.tv_nsec = (timeout % 1000) * 1000000;
+    sigemptyset(&waitset);
+    sigaddset(&waitset, SIGUSR1);
+
+    return sigtimedwait(&waitset, NULL, &ts);
+}
+
+static int has_work(CPUState *env)
+{
+    int r = 0;
+    if (!env->halted)
+        r = 1;
+    if (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI))
+        r = 1;
+    return r;
+}
+
+static void qemu_wait_io_event(CPUState *env, int timeout)
+{
+    qemu_mutex_unlock(&qemu_global_mutex);
+
+    if (timeout && !has_work(env))
+        wait_signal(timeout);
+    /*
+     * Users of qemu_global_mutex can be starved, having no chance
+     * to acquire it since this path will get to it first.
+     * So use another lock to provide fairness.
+     */
+    qemu_mutex_lock(&qemu_fair_mutex);
+    qemu_mutex_unlock(&qemu_fair_mutex);

This is extremely subtle. I think it can be made a bit more explicit via something like:

int generation = qemu_io_generation() + 1;

qemu_mutex_unlock(&qemu_global_mutex);

if (timeout && !has_work(env))
    wait_signal(timeout);

qemu_mutex_lock(&qemu_global_mutex)

while (qemu_io_generation() < generation)
   qemu_cond_wait(&qemu_io_generation_cond, &qemu_global_mutex);

Then, in main_loop_wait():

 void main_loop_wait(int timeout)
 {
     IOHandlerRecord *ioh;
@@ -3660,7 +3806,7 @@ void main_loop_wait(int timeout)
      */
     qemu_mutex_unlock(&qemu_global_mutex);
     ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
-    qemu_mutex_lock(&qemu_global_mutex);

qemu_mutex_lock(&qemu_global_mutex);
qemu_io_generation_add();
qemu_cond_signal(&qemu_io_generation_cond);

This will ensure that the TCG loop backs off of qemu_global_mutex for at least one main_loop_wait() iteration.

Regards,

Anthony Liguori





reply via email to

[Prev in Thread] Current Thread [Next in Thread]