[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v8 44/74] i386/hvf: convert to cpu_request_interrupt
From: |
Robert Foley |
Subject: |
[PATCH v8 44/74] i386/hvf: convert to cpu_request_interrupt |
Date: |
Thu, 26 Mar 2020 15:31:26 -0400 |
From: "Emilio G. Cota" <address@hidden>
Reviewed-by: Richard Henderson <address@hidden>
Signed-off-by: Emilio G. Cota <address@hidden>
Signed-off-by: Robert Foley <address@hidden>
---
target/i386/hvf/hvf.c | 8 +++++---
target/i386/hvf/x86hvf.c | 26 +++++++++++++++-----------
2 files changed, 20 insertions(+), 14 deletions(-)
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index bf60ce9d66..52ccdf85e4 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -262,7 +262,7 @@ void update_apic_tpr(CPUState *cpu)
static void hvf_handle_interrupt(CPUState * cpu, int mask)
{
- cpu->interrupt_request |= mask;
+ cpu_interrupt_request_or(cpu, mask);
if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu);
}
@@ -733,10 +733,12 @@ int hvf_vcpu_exec(CPUState *cpu)
ret = 0;
switch (exit_reason) {
case EXIT_REASON_HLT: {
+ uint32_t interrupt_request = cpu_interrupt_request(cpu);
+
macvm_set_rip(cpu, rip + ins_len);
- if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if (!((interrupt_request & CPU_INTERRUPT_HARD) &&
(EFLAGS(env) & IF_MASK))
- && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
+ && !(interrupt_request & CPU_INTERRUPT_NMI) &&
!(idtvec_info & VMCS_IDT_VEC_VALID)) {
cpu_halted_set(cpu, 1);
ret = EXCP_HLT;
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
index 90f1662d0c..892ae0e99a 100644
--- a/target/i386/hvf/x86hvf.c
+++ b/target/i386/hvf/x86hvf.c
@@ -352,6 +352,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
uint8_t vector;
uint64_t intr_type;
+ uint32_t interrupt_request;
bool have_event = true;
if (env->interrupt_injected != -1) {
vector = env->interrupt_injected;
@@ -400,7 +401,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
};
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_NMI) {
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_NMI);
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI;
@@ -411,7 +412,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
}
if (!(env->hflags & HF_INHIBIT_IRQ_MASK) &&
- (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_HARD) &&
(EFLAGS(env) & IF_MASK) && !(info & VMCS_INTR_VALID)) {
int line = cpu_get_pic_interrupt(&x86cpu->env);
cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_HARD);
@@ -420,39 +421,42 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
}
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_HARD) {
vmx_set_int_window_exiting(cpu_state);
}
- return (cpu_state->interrupt_request
- & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR));
+ return cpu_interrupt_request(cpu_state) & (CPU_INTERRUPT_INIT |
+ CPU_INTERRUPT_TPR);
}
int hvf_process_events(CPUState *cpu_state)
{
X86CPU *cpu = X86_CPU(cpu_state);
CPUX86State *env = &cpu->env;
+ uint32_t interrupt_request;
EFLAGS(env) = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
- if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_INIT) {
hvf_cpu_synchronize_state(cpu_state);
do_cpu_init(cpu);
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_POLL) {
cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
}
- if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
+
+ interrupt_request = cpu_interrupt_request(cpu_state);
+ if (((interrupt_request & CPU_INTERRUPT_HARD) &&
(EFLAGS(env) & IF_MASK)) ||
- (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {
+ (interrupt_request & CPU_INTERRUPT_NMI)) {
cpu_halted_set(cpu_state, 0);
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {
+ if (interrupt_request & CPU_INTERRUPT_SIPI) {
hvf_cpu_synchronize_state(cpu_state);
do_cpu_sipi(cpu);
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_TPR) {
cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_TPR);
hvf_cpu_synchronize_state(cpu_state);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
--
2.17.1
- [PATCH v8 51/74] mips: convert to cpu_interrupt_request, (continued)
- [PATCH v8 51/74] mips: convert to cpu_interrupt_request, Robert Foley, 2020/03/26
- [PATCH v8 52/74] nios: convert to cpu_interrupt_request, Robert Foley, 2020/03/26
- [PATCH v8 53/74] s390x: convert to cpu_interrupt_request, Robert Foley, 2020/03/26
- [PATCH v8 54/74] alpha: convert to cpu_interrupt_request, Robert Foley, 2020/03/26
- [PATCH v8 55/74] moxie: convert to cpu_interrupt_request, Robert Foley, 2020/03/26
- [PATCH v8 57/74] openrisc: convert to cpu_interrupt_request, Robert Foley, 2020/03/26
- [PATCH v8 58/74] unicore32: convert to cpu_interrupt_request, Robert Foley, 2020/03/26
- [PATCH v8 56/74] sparc: convert to cpu_interrupt_request, Robert Foley, 2020/03/26
- [PATCH v8 59/74] microblaze: convert to cpu_interrupt_request, Robert Foley, 2020/03/26
- [PATCH v8 60/74] accel/tcg: convert to cpu_interrupt_request, Robert Foley, 2020/03/26
- [PATCH v8 44/74] i386/hvf: convert to cpu_request_interrupt,
Robert Foley <=
- [PATCH v8 61/74] cpu: convert to interrupt_request, Robert Foley, 2020/03/26
- [PATCH v8 62/74] cpu: call .cpu_has_work with the CPU lock held, Robert Foley, 2020/03/26
- [PATCH v8 63/74] cpu: introduce cpu_has_work_with_iothread_lock, Robert Foley, 2020/03/26
- [PATCH v8 64/74] ppc: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/03/26
- [PATCH v8 65/74] mips: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/03/26
- [PATCH v8 66/74] s390x: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/03/26
- [PATCH v8 67/74] riscv: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/03/26
- [PATCH v8 68/74] sparc: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/03/26
- [PATCH v8 69/74] xtensa: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/03/26
- [PATCH v8 70/74] cpu: rename all_cpu_threads_idle to qemu_tcg_rr_all_cpu_threads_idle, Robert Foley, 2020/03/26