qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 09/50] target-lm32: make cpu-qom.h not target specif


From: Paolo Bonzini
Subject: [Qemu-devel] [PATCH 09/50] target-lm32: make cpu-qom.h not target specific
Date: Mon, 16 May 2016 17:35:41 +0200

Make LM32CPU an opaque type within cpu-qom.h, and move all definitions of
private methods, as well as all type definitions that require knowledge
of the layout to cpu.h.  This helps making files independent of NEED_CPU_H
if they only need to pass around CPU pointers.

Signed-off-by: Paolo Bonzini <address@hidden>
---
 target-lm32/cpu-qom.h | 41 +----------------------------------------
 target-lm32/cpu.h     | 44 ++++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 43 insertions(+), 42 deletions(-)

diff --git a/target-lm32/cpu-qom.h b/target-lm32/cpu-qom.h
index 54989e4..b423d25 100644
--- a/target-lm32/cpu-qom.h
+++ b/target-lm32/cpu-qom.h
@@ -47,45 +47,6 @@ typedef struct LM32CPUClass {
     void (*parent_reset)(CPUState *cpu);
 } LM32CPUClass;
 
-/**
- * LM32CPU:
- * @env: #CPULM32State
- *
- * A LatticeMico32 CPU.
- */
-typedef struct LM32CPU {
-    /*< private >*/
-    CPUState parent_obj;
-    /*< public >*/
-
-    CPULM32State env;
-
-    uint32_t revision;
-    uint8_t num_interrupts;
-    uint8_t num_breakpoints;
-    uint8_t num_watchpoints;
-    uint32_t features;
-} LM32CPU;
-
-static inline LM32CPU *lm32_env_get_cpu(CPULM32State *env)
-{
-    return container_of(env, LM32CPU, env);
-}
-
-#define ENV_GET_CPU(e) CPU(lm32_env_get_cpu(e))
-
-#define ENV_OFFSET offsetof(LM32CPU, env)
-
-#ifndef CONFIG_USER_ONLY
-extern const struct VMStateDescription vmstate_lm32_cpu;
-#endif
-
-void lm32_cpu_do_interrupt(CPUState *cpu);
-bool lm32_cpu_exec_interrupt(CPUState *cs, int int_req);
-void lm32_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
-                         int flags);
-hwaddr lm32_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
-int lm32_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
-int lm32_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+typedef struct LM32CPU LM32CPU;
 
 #endif
diff --git a/target-lm32/cpu.h b/target-lm32/cpu.h
index f220fc0..22ffa68 100644
--- a/target-lm32/cpu.h
+++ b/target-lm32/cpu.h
@@ -25,6 +25,7 @@
 #define CPUArchState struct CPULM32State
 
 #include "qemu-common.h"
+#include "cpu-qom.h"
 #include "exec/cpu-defs.h"
 struct CPULM32State;
 typedef struct CPULM32State CPULM32State;
@@ -180,6 +181,47 @@ struct CPULM32State {
 
 };
 
+/**
+ * LM32CPU:
+ * @env: #CPULM32State
+ *
+ * A LatticeMico32 CPU.
+ */
+struct LM32CPU {
+    /*< private >*/
+    CPUState parent_obj;
+    /*< public >*/
+
+    CPULM32State env;
+
+    uint32_t revision;
+    uint8_t num_interrupts;
+    uint8_t num_breakpoints;
+    uint8_t num_watchpoints;
+    uint32_t features;
+};
+
+static inline LM32CPU *lm32_env_get_cpu(CPULM32State *env)
+{
+    return container_of(env, LM32CPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(lm32_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(LM32CPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_lm32_cpu;
+#endif
+
+void lm32_cpu_do_interrupt(CPUState *cpu);
+bool lm32_cpu_exec_interrupt(CPUState *cs, int int_req);
+void lm32_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
+                         int flags);
+hwaddr lm32_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int lm32_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int lm32_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
 typedef enum {
     LM32_WP_DISABLED = 0,
     LM32_WP_READ,
@@ -193,8 +235,6 @@ static inline lm32_wp_t lm32_wp_type(uint32_t dc, int idx)
     return (dc >> (idx+1)*2) & 0x3;
 }
 
-#include "cpu-qom.h"
-
 LM32CPU *cpu_lm32_init(const char *cpu_model);
 int cpu_lm32_exec(CPUState *cpu);
 /* you can call this signal handler from your SIGBUS and SIGSEGV
-- 
1.8.3.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]