[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v10 20/73] i386: convert to cpu_halted
From: |
Robert Foley |
Subject: |
[PATCH v10 20/73] i386: convert to cpu_halted |
Date: |
Wed, 17 Jun 2020 17:01:38 -0400 |
From: "Emilio G. Cota" <cota@braap.org>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
[RF: Converted new code in i386/hax-all.c to cpu_halted]
Signed-off-by: Robert Foley <robert.foley@linaro.org>
---
target/i386/cpu.c | 2 +-
target/i386/cpu.h | 2 +-
target/i386/hax-all.c | 6 +++---
target/i386/helper.c | 4 ++--
target/i386/hvf/hvf.c | 4 ++--
target/i386/hvf/x86hvf.c | 4 ++--
target/i386/kvm.c | 10 +++++-----
target/i386/misc_helper.c | 2 +-
target/i386/whpx-all.c | 6 +++---
9 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index b1b311baa2..4d3ab0f3a2 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -6084,7 +6084,7 @@ static void x86_cpu_reset(DeviceState *dev)
/* We hard-wire the BSP to the first CPU. */
apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
- s->halted = !cpu_is_bsp(cpu);
+ cpu_halted_set(s, !cpu_is_bsp(cpu));
if (kvm_enabled()) {
kvm_arch_reset_vcpu(cpu);
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 7d77efd9e4..c618b90568 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1880,7 +1880,7 @@ static inline void cpu_x86_load_seg_cache_sipi(X86CPU
*cpu,
sipi_vector << 12,
env->segs[R_CS].limit,
env->segs[R_CS].flags);
- cs->halted = 0;
+ cpu_halted_set(cs, 0);
}
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
index c93bb23a44..acfb7a6e10 100644
--- a/target/i386/hax-all.c
+++ b/target/i386/hax-all.c
@@ -511,7 +511,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
- cpu->halted = 0;
+ cpu_halted_set(cpu, 0);
}
if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
@@ -529,7 +529,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
hax_vcpu_sync_state(env, 1);
}
- if (cpu->halted) {
+ if (cpu_halted(cpu)) {
/* If this vcpu is halted, we must not ask HAXM to run it. Instead, we
* break out of hax_smp_cpu_exec() as if this vcpu had executed HLT.
* That way, this vcpu thread will be trapped in qemu_wait_io_event(),
@@ -594,7 +594,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
!(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
/* hlt instruction with interrupt disabled is shutdown */
env->eflags |= IF_MASK;
- cpu->halted = 1;
+ cpu_halted_set(cpu, 1);
cpu->exception_index = EXCP_HLT;
ret = 1;
}
diff --git a/target/i386/helper.c b/target/i386/helper.c
index c3a6e4fabe..058de4073d 100644
--- a/target/i386/helper.c
+++ b/target/i386/helper.c
@@ -450,7 +450,7 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
(env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
(env->a20_mask >> 20) & 1,
(env->hflags >> HF_SMM_SHIFT) & 1,
- cs->halted);
+ cpu_halted(cs));
} else
#endif
{
@@ -477,7 +477,7 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
(env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
(env->a20_mask >> 20) & 1,
(env->hflags >> HF_SMM_SHIFT) & 1,
- cs->halted);
+ cpu_halted(cs));
}
for(i = 0; i < 6; i++) {
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index be016b951a..b3bd2285fa 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -709,7 +709,7 @@ int hvf_vcpu_exec(CPUState *cpu)
vmx_update_tpr(cpu);
qemu_mutex_unlock_iothread();
- if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
+ if (!cpu_is_bsp(X86_CPU(cpu)) && cpu_halted(cpu)) {
qemu_mutex_lock_iothread();
return EXCP_HLT;
}
@@ -742,7 +742,7 @@ int hvf_vcpu_exec(CPUState *cpu)
(env->eflags & IF_MASK))
&& !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
!(idtvec_info & VMCS_IDT_VEC_VALID)) {
- cpu->halted = 1;
+ cpu_halted_set(cpu, 1);
ret = EXCP_HLT;
break;
}
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
index 5cbcb32ab6..c09cf160ef 100644
--- a/target/i386/hvf/x86hvf.c
+++ b/target/i386/hvf/x86hvf.c
@@ -446,7 +446,7 @@ int hvf_process_events(CPUState *cpu_state)
if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
(cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {
- cpu_state->halted = 0;
+ cpu_halted_set(cpu_state, 0);
}
if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {
hvf_cpu_synchronize_state(cpu_state);
@@ -458,5 +458,5 @@ int hvf_process_events(CPUState *cpu_state)
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
env->tpr_access_type);
}
- return cpu_state->halted;
+ return cpu_halted(cpu_state);
}
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index b3c13cb898..eda51904dd 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -3594,7 +3594,7 @@ static int kvm_get_mp_state(X86CPU *cpu)
}
env->mp_state = mp_state.mp_state;
if (kvm_irqchip_in_kernel()) {
- cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
+ cpu_halted_set(cs, mp_state.mp_state == KVM_MP_STATE_HALTED);
}
return 0;
}
@@ -4152,7 +4152,7 @@ int kvm_arch_process_async_events(CPUState *cs)
kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
env->has_error_code = 0;
- cs->halted = 0;
+ cpu_halted_set(cs, 0);
if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
env->mp_state = KVM_MP_STATE_RUNNABLE;
}
@@ -4175,7 +4175,7 @@ int kvm_arch_process_async_events(CPUState *cs)
if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
- cs->halted = 0;
+ cpu_halted_set(cs, 0);
}
if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
kvm_cpu_synchronize_state(cs);
@@ -4188,7 +4188,7 @@ int kvm_arch_process_async_events(CPUState *cs)
env->tpr_access_type);
}
- return cs->halted;
+ return cpu_halted(cs);
}
static int kvm_handle_halt(X86CPU *cpu)
@@ -4199,7 +4199,7 @@ static int kvm_handle_halt(X86CPU *cpu)
if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) &&
!(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
- cs->halted = 1;
+ cpu_halted_set(cs, 1);
return EXCP_HLT;
}
diff --git a/target/i386/misc_helper.c b/target/i386/misc_helper.c
index b6b1d41b14..c396b6c7b9 100644
--- a/target/i386/misc_helper.c
+++ b/target/i386/misc_helper.c
@@ -558,7 +558,7 @@ static void do_hlt(X86CPU *cpu)
CPUX86State *env = &cpu->env;
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
- cs->halted = 1;
+ cpu_halted_set(cs, 1);
cs->exception_index = EXCP_HLT;
cpu_loop_exit(cs);
}
diff --git a/target/i386/whpx-all.c b/target/i386/whpx-all.c
index c78baac6df..efc2d88810 100644
--- a/target/i386/whpx-all.c
+++ b/target/i386/whpx-all.c
@@ -759,7 +759,7 @@ static int whpx_handle_halt(CPUState *cpu)
(env->eflags & IF_MASK)) &&
!(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
cpu->exception_index = EXCP_HLT;
- cpu->halted = true;
+ cpu_halted_set(cpu, true);
ret = 1;
}
qemu_mutex_unlock_iothread();
@@ -918,7 +918,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu)
if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
- cpu->halted = false;
+ cpu_halted_set(cpu, false);
}
if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
@@ -944,7 +944,7 @@ static int whpx_vcpu_run(CPUState *cpu)
int ret;
whpx_vcpu_process_async_events(cpu);
- if (cpu->halted) {
+ if (cpu_halted(cpu)) {
cpu->exception_index = EXCP_HLT;
atomic_set(&cpu->exit_request, false);
return 0;
--
2.17.1
- [PATCH v10 08/73] ppc: convert to helper_cpu_halted_set, (continued)
- [PATCH v10 08/73] ppc: convert to helper_cpu_halted_set, Robert Foley, 2020/06/17
- [PATCH v10 14/73] cpu: define cpu_halted helpers, Robert Foley, 2020/06/17
- [PATCH v10 12/73] alpha: convert to helper_cpu_halted_set, Robert Foley, 2020/06/17
- [PATCH v10 15/73] tcg-runtime: convert to cpu_halted_set, Robert Foley, 2020/06/17
- [PATCH v10 13/73] microblaze: convert to helper_cpu_halted_set, Robert Foley, 2020/06/17
- [PATCH v10 17/73] arm: convert to cpu_halted, Robert Foley, 2020/06/17
- [PATCH v10 16/73] hw/semihosting: convert to cpu_halted_set, Robert Foley, 2020/06/17
- [PATCH v10 18/73] ppc: convert to cpu_halted, Robert Foley, 2020/06/17
- [PATCH v10 19/73] sh4: convert to cpu_halted, Robert Foley, 2020/06/17
- [PATCH v10 21/73] lm32: convert to cpu_halted, Robert Foley, 2020/06/17
- [PATCH v10 20/73] i386: convert to cpu_halted,
Robert Foley <=
- [PATCH v10 26/73] sparc: convert to cpu_halted, Robert Foley, 2020/06/17
- [PATCH v10 27/73] xtensa: convert to cpu_halted, Robert Foley, 2020/06/17
- [PATCH v10 25/73] s390x: convert to cpu_halted, Robert Foley, 2020/06/17
- [PATCH v10 29/73] openrisc: convert to cpu_halted, Robert Foley, 2020/06/17
- [PATCH v10 32/73] cpu: define cpu_interrupt_request helpers, Robert Foley, 2020/06/17
- [PATCH v10 22/73] m68k: convert to cpu_halted, Robert Foley, 2020/06/17
- [PATCH v10 23/73] mips: convert to cpu_halted, Robert Foley, 2020/06/17
- [PATCH v10 28/73] gdbstub: convert to cpu_halted, Robert Foley, 2020/06/17
- [PATCH v10 34/73] exec: use cpu_reset_interrupt, Robert Foley, 2020/06/17
- [PATCH v10 30/73] cpu-exec: convert to cpu_halted, Robert Foley, 2020/06/17