qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH qom-cpu v2 03/40] target-i386: Clean up ENV_GET_CPU(


From: Andreas Färber
Subject: [Qemu-devel] [PATCH qom-cpu v2 03/40] target-i386: Clean up ENV_GET_CPU() usage
Date: Mon, 10 Mar 2014 01:15:12 +0100

Commits fdfba1a298ae26dd44bcfdb0429314139a0bc55a,
f606604f1c10b60ef294f1b9b229426521a365e3 and
2c17449b3022ca9623c4a7e2a504a4150ac4ad30 added usages of ENV_GET_CPU()
macro in target-specific code.

Use x86_env_get_cpu() or reuse existing X86CPU variable instead.

Cc: Edgar E. Iglesias <address@hidden>
Cc: Peter Maydell <address@hidden>
Signed-off-by: Andreas Färber <address@hidden>
---
 target-i386/helper.c     |  2 +-
 target-i386/seg_helper.c |  2 +-
 target-i386/smm_helper.c |  2 +-
 target-i386/svm_helper.c | 16 +++++++++-------
 4 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/target-i386/helper.c b/target-i386/helper.c
index 55c0457..11c7219 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -515,7 +515,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong 
addr,
 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
                              int is_write1, int mmu_idx)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
     uint64_t ptep, pte;
     target_ulong pde_addr, pte_addr;
     int error_code, is_dirty, prot, page_size, is_write, is_user;
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index 959212b..9dda02d 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -1131,7 +1131,7 @@ static void do_interrupt_user(CPUX86State *env, int 
intno, int is_int,
 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
                             int error_code, int is_hw, int rm)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
     uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
                                                           control.event_inj));
 
diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c
index 71c64b2..35901c9 100644
--- a/target-i386/smm_helper.c
+++ b/target-i386/smm_helper.c
@@ -181,8 +181,8 @@ void do_smm_enter(X86CPU *cpu)
 
 void helper_rsm(CPUX86State *env)
 {
-    CPUState *cs = ENV_GET_CPU(env);
     X86CPU *cpu = x86_env_get_cpu(env);
+    CPUState *cs = CPU(cpu);
     target_ulong sm_state;
     int i, offset;
     uint32_t val;
diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c
index b38d450..bc33e61 100644
--- a/target-i386/svm_helper.c
+++ b/target-i386/svm_helper.c
@@ -88,7 +88,8 @@ void helper_svm_check_io(CPUX86State *env, uint32_t port, 
uint32_t param,
 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
                                 const SegmentCache *sc)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+
     stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
              sc->selector);
     stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
@@ -102,7 +103,7 @@ static inline void svm_save_seg(CPUX86State *env, hwaddr 
addr,
 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
                                 SegmentCache *sc)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
     unsigned int flags;
 
     sc->selector = lduw_phys(cs->as,
@@ -125,7 +126,7 @@ static inline void svm_load_seg_cache(CPUX86State *env, 
hwaddr addr,
 
 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
     target_ulong addr;
     uint32_t event_inj;
     uint32_t int_ctl;
@@ -365,7 +366,7 @@ void helper_vmmcall(CPUX86State *env)
 
 void helper_vmload(CPUX86State *env, int aflag)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
     target_ulong addr;
 
     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
@@ -405,7 +406,7 @@ void helper_vmload(CPUX86State *env, int aflag)
 
 void helper_vmsave(CPUX86State *env, int aflag)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
     target_ulong addr;
 
     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
@@ -486,7 +487,7 @@ void helper_invlpga(CPUX86State *env, int aflag)
 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
                                       uint64_t param)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
 
     if (likely(!(env->hflags & HF_SVMI_MASK))) {
         return;
@@ -568,7 +569,8 @@ void cpu_svm_check_intercept_param(CPUX86State *env, 
uint32_t type,
 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
                          uint32_t next_eip_addend)
 {
-    CPUState *cs = ENV_GET_CPU(env);
+    CPUState *cs = CPU(x86_env_get_cpu(env));
+
     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
         /* FIXME: this should be read in at vmrun (faster this way?) */
         uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
-- 
1.8.4.5




reply via email to

[Prev in Thread] Current Thread [Next in Thread]