qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [CFT PATCH 05/12] always qemu_cpu_kick after unhalting a cp


From: Paolo Bonzini
Subject: [Qemu-devel] [CFT PATCH 05/12] always qemu_cpu_kick after unhalting a cpu
Date: Tue, 8 Feb 2011 18:18:22 +0100

This ensures env->halt_cond is broadcast, and the loop in
qemu_tcg_wait_io_event and qemu_kvm_wait_io_event is exited
naturally rather than through a timeout.

Signed-off-by: Paolo Bonzini <address@hidden>
---
 hw/apic.c          |    1 +
 hw/pc.c            |    1 +
 hw/ppc.c           |    2 ++
 hw/sun4m.c         |   11 +++++++++--
 hw/sun4u.c         |    4 ++--
 target-s390x/kvm.c |    1 +
 6 files changed, 16 insertions(+), 4 deletions(-)

diff --git a/hw/apic.c b/hw/apic.c
index 2f8376a..97e3b2b 100644
--- a/hw/apic.c
+++ b/hw/apic.c
@@ -527,6 +527,7 @@ void apic_sipi(DeviceState *d)
         return;
     cpu_x86_load_seg_cache_sipi(s->cpu_env, s->sipi_vector);
     s->wait_for_sipi = 0;
+    qemu_cpu_kick(s->cpu_env);
 }
 
 static void apic_deliver(DeviceState *d, uint8_t dest, uint8_t dest_mode,
diff --git a/hw/pc.c b/hw/pc.c
index 4dfdc0b..fc1aa27 100644
--- a/hw/pc.c
+++ b/hw/pc.c
@@ -907,6 +907,7 @@ static void pc_cpu_reset(void *opaque)
 
     cpu_reset(env);
     env->halted = !cpu_is_bsp(env);
+    qemu_cpu_kick(env);
 }
 
 static CPUState *pc_new_cpu(const char *cpu_model)
diff --git a/hw/ppc.c b/hw/ppc.c
index 968aec1..de02d33 100644
--- a/hw/ppc.c
+++ b/hw/ppc.c
@@ -208,6 +208,7 @@ static void ppc970_set_irq (void *opaque, int pin, int 
level)
             } else {
                 LOG_IRQ("%s: restart the CPU\n", __func__);
                 env->halted = 0;
+                qemu_cpu_kick(env);
             }
             break;
         case PPC970_INPUT_HRESET:
@@ -300,6 +301,7 @@ static void ppc40x_set_irq (void *opaque, int pin, int 
level)
             } else {
                 LOG_IRQ("%s: restart the CPU\n", __func__);
                 env->halted = 0;
+                qemu_cpu_kick(env);
             }
             break;
         case PPC40x_INPUT_DEBUG:
diff --git a/hw/sun4m.c b/hw/sun4m.c
index 30e8a21..47d88be 100644
--- a/hw/sun4m.c
+++ b/hw/sun4m.c
@@ -253,15 +253,21 @@ void cpu_check_irqs(CPUState *env)
     }
 }
 
+static void cpu_kick_irq(CPUState *env)
+{
+    env->halted = 0;
+    cpu_check_irqs(env);
+    qemu_cpu_kick(env);
+}
+
 static void cpu_set_irq(void *opaque, int irq, int level)
 {
     CPUState *env = opaque;
 
     if (level) {
         trace_sun4m_cpu_set_irq_raise(irq);
-        env->halted = 0;
         env->pil_in |= 1 << irq;
-        cpu_check_irqs(env);
+        cpu_kick_irq(env);
     } else {
         trace_sun4m_cpu_set_irq_lower(irq);
         env->pil_in &= ~(1 << irq);
@@ -279,6 +285,7 @@ static void main_cpu_reset(void *opaque)
 
     cpu_reset(env);
     env->halted = 0;
+    qemu_cpu_kick(env);
 }
 
 static void secondary_cpu_reset(void *opaque)
diff --git a/hw/sun4u.c b/hw/sun4u.c
index 90b1ce2..d282324 100644
--- a/hw/sun4u.c
+++ b/hw/sun4u.c
@@ -298,6 +298,7 @@ static void cpu_kick_irq(CPUState *env)
 {
     env->halted = 0;
     cpu_check_irqs(env);
+    qemu_cpu_kick(env);
 }
 
 static void cpu_set_irq(void *opaque, int irq, int level)
@@ -306,9 +307,8 @@ static void cpu_set_irq(void *opaque, int irq, int level)
 
     if (level) {
         CPUIRQ_DPRINTF("Raise CPU IRQ %d\n", irq);
-        env->halted = 0;
         env->pil_in |= 1 << irq;
-        cpu_check_irqs(env);
+        cpu_kick_irq(env);
     } else {
         CPUIRQ_DPRINTF("Lower CPU IRQ %d\n", irq);
         env->pil_in &= ~(1 << irq);
diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c
index 1702c46..fc90b8d 100644
--- a/target-s390x/kvm.c
+++ b/target-s390x/kvm.c
@@ -196,6 +196,7 @@ static void kvm_s390_interrupt_internal(CPUState *env, int 
type, uint32_t parm,
 
     env->halted = 0;
     env->exception_index = -1;
+    qemu_cpu_kick (env);
 
     kvmint.type = type;
     kvmint.parm = parm;
-- 
1.7.3.5





reply via email to

[Prev in Thread] Current Thread [Next in Thread]