qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH] cpus: unify qemu_*_wait_io_event


From: Paolo Bonzini
Subject: Re: [Qemu-devel] [PATCH] cpus: unify qemu_*_wait_io_event
Date: Wed, 4 Apr 2018 12:51:04 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Thunderbird/52.6.0

On 04/04/2018 12:34, Alex Bennée wrote:
> 
> Paolo Bonzini <address@hidden> writes:
> 
>> Except for round-robin TCG, every other accelerator is using more or
>> less the same code around qemu_wait_io_event_common.  The exception
>> is HAX, which also has to eat the dummy APC that is queued by
>> qemu_cpu_kick_thread.
>>
>> We can add the SleepEx call to qemu_wait_io_event under "if
>> (!tcg_enabled())", since that is the condition that is used in
>> qemu_cpu_kick_thread, and unify the function for KVM, HAX, HVF and
>> multi-threaded TCG.  Single-threaded TCG code can also be simplified
>> since it is only used in the round-robin, sleep-if-all-CPUs-idle case.
>>
>> Signed-off-by: Paolo Bonzini <address@hidden>
> 
> I had trouble applying this patch but the change seems sane to me
> 
> Acked-by: Alex Bennée <address@hidden>

Isn't this commit db08b687cdd5319286665aabd34f82665630416f?

Paolo

>> ---
>>  cpus.c | 49 +++++++++++++++++--------------------------------
>>  1 file changed, 17 insertions(+), 32 deletions(-)
>>
>> diff --git a/cpus.c b/cpus.c
>> index 440b9291f5..3b6c9879ec 100644
>> --- a/cpus.c
>> +++ b/cpus.c
>> @@ -911,7 +911,8 @@ static void kick_tcg_thread(void *opaque)
>>
>>  static void start_tcg_kick_timer(void)
>>  {
>> -    if (!mttcg_enabled && !tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
>> +    assert(!mttcg_enabled);
>> +    if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
>>          tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
>>                                             kick_tcg_thread, NULL);
>>          timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
>> @@ -920,6 +921,7 @@ static void start_tcg_kick_timer(void)
>>
>>  static void stop_tcg_kick_timer(void)
>>  {
>> +    assert(!mttcg_enabled);
>>      if (tcg_kick_vcpu_timer) {
>>          timer_del(tcg_kick_vcpu_timer);
>>          tcg_kick_vcpu_timer = NULL;
>> @@ -1154,18 +1156,9 @@ static void qemu_wait_io_event_common(CPUState *cpu)
>>      process_queued_cpu_work(cpu);
>>  }
>>
>> -static bool qemu_tcg_should_sleep(CPUState *cpu)
>> +static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
>>  {
>> -    if (mttcg_enabled) {
>> -        return cpu_thread_is_idle(cpu);
>> -    } else {
>> -        return all_cpu_threads_idle();
>> -    }
>> -}
>> -
>> -static void qemu_tcg_wait_io_event(CPUState *cpu)
>> -{
>> -    while (qemu_tcg_should_sleep(cpu)) {
>> +    while (all_cpu_threads_idle()) {
>>          stop_tcg_kick_timer();
>>          qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
>>      }
>> @@ -1175,20 +1168,18 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
>>      qemu_wait_io_event_common(cpu);
>>  }
>>
>> -static void qemu_kvm_wait_io_event(CPUState *cpu)
>> +static void qemu_wait_io_event(CPUState *cpu)
>>  {
>>      while (cpu_thread_is_idle(cpu)) {
>>          qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
>>      }
>>
>> -    qemu_wait_io_event_common(cpu);
>> -}
>> -
>> -static void qemu_hvf_wait_io_event(CPUState *cpu)
>> -{
>> -    while (cpu_thread_is_idle(cpu)) {
>> -        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
>> +#ifdef _WIN32
>> +    /* Eat dummy APC queued by qemu_cpu_kick_thread.  */
>> +    if (!tcg_enabled()) {
>> +        SleepEx(0, TRUE);
>>      }
>> +#endif
>>      qemu_wait_io_event_common(cpu);
>>  }
>>
>> @@ -1224,7 +1215,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
>>                  cpu_handle_guest_debug(cpu);
>>              }
>>          }
>> -        qemu_kvm_wait_io_event(cpu);
>> +        qemu_wait_io_event(cpu);
>>      } while (!cpu->unplug || cpu_can_run(cpu));
>>
>>      qemu_kvm_destroy_vcpu(cpu);
>> @@ -1270,7 +1261,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
>>              exit(1);
>>          }
>>          qemu_mutex_lock_iothread();
>> -        qemu_wait_io_event_common(cpu);
>> +        qemu_wait_io_event(cpu);
>>      }
>>
>>      return NULL;
>> @@ -1487,7 +1478,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
>>              atomic_mb_set(&cpu->exit_request, 0);
>>          }
>>
>> -        qemu_tcg_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
>> +        qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
>>          deal_with_unplugged_cpus();
>>      }
>>
>> @@ -1518,13 +1509,7 @@ static void *qemu_hax_cpu_thread_fn(void *arg)
>>              }
>>          }
>>
>> -        while (cpu_thread_is_idle(cpu)) {
>> -            qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
>> -        }
>> -#ifdef _WIN32
>> -        SleepEx(0, TRUE);
>> -#endif
>> -        qemu_wait_io_event_common(cpu);
>> +        qemu_wait_io_event(cpu);
>>      }
>>      return NULL;
>>  }
>> @@ -1561,7 +1546,7 @@ static void *qemu_hvf_cpu_thread_fn(void *arg)
>>                  cpu_handle_guest_debug(cpu);
>>              }
>>          }
>> -        qemu_hvf_wait_io_event(cpu);
>> +        qemu_wait_io_event(cpu);
>>      } while (!cpu->unplug || cpu_can_run(cpu));
>>
>>      hvf_vcpu_destroy(cpu);
>> @@ -1640,7 +1625,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
>>          }
>>
>>          atomic_mb_set(&cpu->exit_request, 0);
>> -        qemu_tcg_wait_io_event(cpu);
>> +        qemu_wait_io_event(cpu);
>>      }
>>
>>      return NULL;
> 
> 
> --
> Alex Bennée
> 




reply via email to

[Prev in Thread] Current Thread [Next in Thread]