qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PULL 15/28] cpus-common: move CPU list management to commo


From: Paolo Bonzini
Subject: [Qemu-devel] [PULL 15/28] cpus-common: move CPU list management to common code
Date: Mon, 26 Sep 2016 15:40:45 +0200

Add a mutex for the CPU list to system emulation, as it will be used to
manage safe work.  Abstract manipulation of the CPU list in new functions
cpu_list_add and cpu_list_remove.

Reviewed-by: Richard Henderson <address@hidden>
Reviewed-by: Alex Bennée <address@hidden>
Signed-off-by: Paolo Bonzini <address@hidden>
---
 Makefile.objs             |  2 +-
 bsd-user/main.c           |  9 +----
 cpus-common.c             | 83 +++++++++++++++++++++++++++++++++++++++++++++++
 exec.c                    | 37 ++-------------------
 include/exec/cpu-common.h |  5 +++
 include/exec/exec-all.h   | 11 -------
 include/qom/cpu.h         | 12 +++++++
 linux-user/main.c         | 17 +++-------
 vl.c                      |  1 +
 9 files changed, 109 insertions(+), 68 deletions(-)
 create mode 100644 cpus-common.c

diff --git a/Makefile.objs b/Makefile.objs
index 7301544..a8e0224 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -89,7 +89,7 @@ endif
 
 #######################################################################
 # Target-independent parts used in system and user emulation
-common-obj-y += tcg-runtime.o
+common-obj-y += tcg-runtime.o cpus-common.o
 common-obj-y += hw/
 common-obj-y += qom/
 common-obj-y += disas/
diff --git a/bsd-user/main.c b/bsd-user/main.c
index 0fb08e4..591c424 100644
--- a/bsd-user/main.c
+++ b/bsd-user/main.c
@@ -95,14 +95,6 @@ void fork_end(int child)
     }
 }
 
-void cpu_list_lock(void)
-{
-}
-
-void cpu_list_unlock(void)
-{
-}
-
 #ifdef TARGET_I386
 /***********************************************************/
 /* CPUX86 core interface */
@@ -748,6 +740,7 @@ int main(int argc, char **argv)
     if (argc <= 1)
         usage();
 
+    qemu_init_cpu_list();
     module_call_init(MODULE_INIT_QOM);
 
     if ((envlist = envlist_create()) == NULL) {
diff --git a/cpus-common.c b/cpus-common.c
new file mode 100644
index 0000000..fda3848
--- /dev/null
+++ b/cpus-common.c
@@ -0,0 +1,83 @@
+/*
+ * CPU thread main loop - common bits for user and system mode emulation
+ *
+ *  Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "exec/cpu-common.h"
+#include "qom/cpu.h"
+#include "sysemu/cpus.h"
+
+static QemuMutex qemu_cpu_list_lock;
+
+void qemu_init_cpu_list(void)
+{
+    qemu_mutex_init(&qemu_cpu_list_lock);
+}
+
+void cpu_list_lock(void)
+{
+    qemu_mutex_lock(&qemu_cpu_list_lock);
+}
+
+void cpu_list_unlock(void)
+{
+    qemu_mutex_unlock(&qemu_cpu_list_lock);
+}
+
+static bool cpu_index_auto_assigned;
+
+static int cpu_get_free_index(void)
+{
+    CPUState *some_cpu;
+    int cpu_index = 0;
+
+    cpu_index_auto_assigned = true;
+    CPU_FOREACH(some_cpu) {
+        cpu_index++;
+    }
+    return cpu_index;
+}
+
+void cpu_list_add(CPUState *cpu)
+{
+    qemu_mutex_lock(&qemu_cpu_list_lock);
+    if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
+        cpu->cpu_index = cpu_get_free_index();
+        assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
+    } else {
+        assert(!cpu_index_auto_assigned);
+    }
+    QTAILQ_INSERT_TAIL(&cpus, cpu, node);
+    qemu_mutex_unlock(&qemu_cpu_list_lock);
+}
+
+void cpu_list_remove(CPUState *cpu)
+{
+    qemu_mutex_lock(&qemu_cpu_list_lock);
+    if (!QTAILQ_IN_USE(cpu, node)) {
+        /* there is nothing to undo since cpu_exec_init() hasn't been called */
+        qemu_mutex_unlock(&qemu_cpu_list_lock);
+        return;
+    }
+
+    assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ)));
+
+    QTAILQ_REMOVE(&cpus, cpu, node);
+    cpu->cpu_index = UNASSIGNED_CPU_INDEX;
+    qemu_mutex_unlock(&qemu_cpu_list_lock);
+}
diff --git a/exec.c b/exec.c
index c81d5ab..c8389f9 100644
--- a/exec.c
+++ b/exec.c
@@ -598,36 +598,11 @@ AddressSpace *cpu_get_address_space(CPUState *cpu, int 
asidx)
 }
 #endif
 
-static bool cpu_index_auto_assigned;
-
-static int cpu_get_free_index(void)
-{
-    CPUState *some_cpu;
-    int cpu_index = 0;
-
-    cpu_index_auto_assigned = true;
-    CPU_FOREACH(some_cpu) {
-        cpu_index++;
-    }
-    return cpu_index;
-}
-
 void cpu_exec_exit(CPUState *cpu)
 {
     CPUClass *cc = CPU_GET_CLASS(cpu);
 
-    cpu_list_lock();
-    if (!QTAILQ_IN_USE(cpu, node)) {
-        /* there is nothing to undo since cpu_exec_init() hasn't been called */
-        cpu_list_unlock();
-        return;
-    }
-
-    assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ)));
-
-    QTAILQ_REMOVE(&cpus, cpu, node);
-    cpu->cpu_index = UNASSIGNED_CPU_INDEX;
-    cpu_list_unlock();
+    cpu_list_remove(cpu);
 
     if (cc->vmsd != NULL) {
         vmstate_unregister(NULL, cc->vmsd, cpu);
@@ -663,15 +638,7 @@ void cpu_exec_init(CPUState *cpu, Error **errp)
     object_ref(OBJECT(cpu->memory));
 #endif
 
-    cpu_list_lock();
-    if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
-        cpu->cpu_index = cpu_get_free_index();
-        assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
-    } else {
-        assert(!cpu_index_auto_assigned);
-    }
-    QTAILQ_INSERT_TAIL(&cpus, cpu, node);
-    cpu_list_unlock();
+    cpu_list_add(cpu);
 
 #ifndef CONFIG_USER_ONLY
     if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 952bcfe..869ba41 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -23,6 +23,11 @@ typedef struct CPUListState {
     FILE *file;
 } CPUListState;
 
+/* The CPU list lock nests outside tb_lock/tb_unlock.  */
+void qemu_init_cpu_list(void);
+void cpu_list_lock(void);
+void cpu_list_unlock(void);
+
 #if !defined(CONFIG_USER_ONLY)
 
 enum device_endian {
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 008e09a..336a57c 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -56,17 +56,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
                               target_ulong pc, target_ulong cs_base,
                               uint32_t flags,
                               int cflags);
-#if defined(CONFIG_USER_ONLY)
-void cpu_list_lock(void);
-void cpu_list_unlock(void);
-#else
-static inline void cpu_list_unlock(void)
-{
-}
-static inline void cpu_list_lock(void)
-{
-}
-#endif
 
 void cpu_exec_init(CPUState *cpu, Error **errp);
 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 4aa9e61..ea3233f 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -545,6 +545,18 @@ static inline int cpu_asidx_from_attrs(CPUState *cpu, 
MemTxAttrs attrs)
 #endif
 
 /**
+ * cpu_list_add:
+ * @cpu: The CPU to be added to the list of CPUs.
+ */
+void cpu_list_add(CPUState *cpu);
+
+/**
+ * cpu_list_remove:
+ * @cpu: The CPU to be removed from the list of CPUs.
+ */
+void cpu_list_remove(CPUState *cpu);
+
+/**
  * cpu_reset:
  * @cpu: The CPU whose state is to be reset.
  */
diff --git a/linux-user/main.c b/linux-user/main.c
index 6e14010..719f046 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -111,7 +111,6 @@ int cpu_get_pic_interrupt(CPUX86State *env)
    We don't require a full sync, only that no cpus are executing guest code.
    The alternative is to map target atomic ops onto host equivalents,
    which requires quite a lot of per host/target work.  */
-static QemuMutex cpu_list_lock;
 static QemuMutex exclusive_lock;
 static QemuCond exclusive_cond;
 static QemuCond exclusive_resume;
@@ -119,7 +118,6 @@ static int pending_cpus;
 
 void qemu_init_cpu_loop(void)
 {
-    qemu_mutex_init(&cpu_list_lock);
     qemu_mutex_init(&exclusive_lock);
     qemu_cond_init(&exclusive_cond);
     qemu_cond_init(&exclusive_resume);
@@ -128,6 +126,7 @@ void qemu_init_cpu_loop(void)
 /* Make sure everything is in a consistent state for calling fork().  */
 void fork_start(void)
 {
+    cpu_list_lock();
     qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
     qemu_mutex_lock(&exclusive_lock);
     mmap_fork_start();
@@ -147,14 +146,15 @@ void fork_end(int child)
         }
         pending_cpus = 0;
         qemu_mutex_init(&exclusive_lock);
-        qemu_mutex_init(&cpu_list_lock);
         qemu_cond_init(&exclusive_cond);
         qemu_cond_init(&exclusive_resume);
         qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
+        qemu_init_cpu_list();
         gdbserver_fork(thread_cpu);
     } else {
         qemu_mutex_unlock(&exclusive_lock);
         qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
+        cpu_list_unlock();
     }
 }
 
@@ -221,16 +221,6 @@ static inline void cpu_exec_end(CPUState *cpu)
     qemu_mutex_unlock(&exclusive_lock);
 }
 
-void cpu_list_lock(void)
-{
-    qemu_mutex_lock(&cpu_list_lock);
-}
-
-void cpu_list_unlock(void)
-{
-    qemu_mutex_unlock(&cpu_list_lock);
-}
-
 
 #ifdef TARGET_I386
 /***********************************************************/
@@ -4229,6 +4219,7 @@ int main(int argc, char **argv, char **envp)
     int ret;
     int execfd;
 
+    qemu_init_cpu_list();
     qemu_init_cpu_loop();
     module_call_init(MODULE_INIT_QOM);
 
diff --git a/vl.c b/vl.c
index 215a6f9..eda83fa 100644
--- a/vl.c
+++ b/vl.c
@@ -3017,6 +3017,7 @@ int main(int argc, char **argv, char **envp)
     Error *err = NULL;
     bool list_data_dirs = false;
 
+    qemu_init_cpu_list();
     qemu_init_cpu_loop();
     qemu_mutex_lock_iothread();
 
-- 
2.7.4





reply via email to

[Prev in Thread] Current Thread [Next in Thread]