qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC PATCH 10/10] cpus: reclaim allocated vCPU objects


From: Anshul Makkar
Subject: Re: [Qemu-devel] [RFC PATCH 10/10] cpus: reclaim allocated vCPU objects
Date: Mon, 11 Aug 2014 16:35:26 +0200

Hi Gu,

These are APIC IDs.

Taking the example from the previous mail.

Original cpus:0,1 maxcpus:6
(qemu) device_add qemu64-x86_64-cpu,apic-id=3,id=cpu3
(qemu) device_add qemu64-x86_64-cpu,apic-id=5,id=cpu5

cat /proc/cpuinfo shows
processor 0
processor 1
processor 2
processor 3

instead of 3 and 5 cpus 2 and 3 have been added.

Now if I do again

(qemu) device_add qemu64-x86_64-cpu,apic-id=5,id=cpu5
it says cpu already exists but cat /proc/cpuinfo doesn't show me cpu
with apicid 5.

Scenario 2:

Original cpus:0,1 maxcpus:6
(qemu) device_add qemu64-x86_64-cpu,apic-id=2,id=cpu2
(qemu) device_add qemu64-x86_64-cpu,apic-id=3,id=cpu3
(qemu) device_add qemu64-x86_64-cpu,apic-id=4,id=cpu4
cat /proc/cpuinfo
processor 0
processor 1
processor 2
processor 3
processor 4


(qemu) device_del cpu2
(qemu) device_del cpu4
cat /proc/cpuinof
processor 0
processor 1
processor 3

(qemu) device_add qemu64-x86_64-cpu,apic-id=4,id=cpu4

cpu 2 gets added instead of 4 and cat /proc/cpuinfo shows
processor 0
processor 1
processor 2
processor 3

I can just see that random deletion and addition is not possible.

I have put traces in the code to verify the APIC IDs as I couldn't see
APIC IDs in output of "cat /proc/cpuinfo ".

Please let me know if I am missing something .

Thanks
Anshul Makkar


On Fri, Aug 8, 2014 at 7:48 AM, Gu Zheng <address@hidden> wrote:
> Hi Anshul,
> On 08/07/2014 09:31 PM, Anshul Makkar wrote:
>
>> Thanks Gu.. cpu-hotunplug is working fine in my  tests.
>
> Thanks for your quick test.
>
>>
>> For cpu-hotplug, I get inconsistent result if I delete arbitrary cpu
>> and not just the last one.
>>
>> for eg
>> list of cpus: 1, 2 ,3
>> device_add cpu 4
>> device_add cpu 5
>> device_add cpu 6
>
> What type id do you use here? apic-id or device id?
>
>>
>> device_del cpu 4
>> device_del cpu 6
>
> Could you please offer the detail reproduce info? the more the better.
>
>>
>> now if I do device_add cpu6, then cpu 4 gets added and now if I try to
>> do add cpu 4 or 6, it says cpu already exist.. Its a kind of vague
>> behaviour.. Do, we follow any protocol here while adding and deleting
>> cpus.
>
> There is not strict restriction here. Does the following routine match
> the condition you mentioned? It works fine in my box.
>
> Original cpus:0,1 maxcpus:6
> (qemu) device_add qemu64-x86_64-cpu,apic-id=2,id=cpu2
> (qemu) device_add qemu64-x86_64-cpu,apic-id=3,id=cpu3
> (qemu) device_add qemu64-x86_64-cpu,apic-id=4,id=cpu4
>
> (qemu) device_del cpu2
> (qemu) device_del cpu4
>
> (qemu) device_add qemu64-x86_64-cpu,apic-id=4,id=cpu4
> (qemu) device_add qemu64-x86_64-cpu,apic-id=2,id=cpu2
>
> Thanks,
> Gu
>
>>
>> Thanks
>> Anshul Makkar
>> www.justkernel.com
>>
>> On Thu, Aug 7, 2014 at 6:54 AM, Gu Zheng <address@hidden> wrote:
>>> After ACPI get a signal to eject a vCPU, the vCPU must be
>>> removed from CPU list,before the vCPU really removed,  then
>>> release the all related vCPU objects.
>>> But we do not close KVM vcpu fd, just record it into a list, in
>>> order to reuse it.
>>>
>>> Signed-off-by: Chen Fan <address@hidden>
>>> Signed-off-by: Gu Zheng <address@hidden>
>>> ---
>>>  cpus.c               |   37 ++++++++++++++++++++++++++++++++
>>>  include/sysemu/kvm.h |    1 +
>>>  kvm-all.c            |   57 
>>> +++++++++++++++++++++++++++++++++++++++++++++++++-
>>>  3 files changed, 94 insertions(+), 1 deletions(-)
>>>
>>> diff --git a/cpus.c b/cpus.c
>>> index 4dfb889..9a73407 100644
>>> --- a/cpus.c
>>> +++ b/cpus.c
>>> @@ -786,6 +786,24 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void 
>>> *data), void *data)
>>>      qemu_cpu_kick(cpu);
>>>  }
>>>
>>> +static void qemu_kvm_destroy_vcpu(CPUState *cpu)
>>> +{
>>> +    CPU_REMOVE(cpu);
>>> +
>>> +    if (kvm_destroy_vcpu(cpu) < 0) {
>>> +        fprintf(stderr, "kvm_destroy_vcpu failed.\n");
>>> +        exit(1);
>>> +    }
>>> +
>>> +    object_unparent(OBJECT(cpu));
>>> +}
>>> +
>>> +static void qemu_tcg_destroy_vcpu(CPUState *cpu)
>>> +{
>>> +    CPU_REMOVE(cpu);
>>> +    object_unparent(OBJECT(cpu));
>>> +}
>>> +
>>>  static void flush_queued_work(CPUState *cpu)
>>>  {
>>>      struct qemu_work_item *wi;
>>> @@ -877,6 +895,11 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
>>>              }
>>>          }
>>>          qemu_kvm_wait_io_event(cpu);
>>> +        if (cpu->exit && !cpu_can_run(cpu)) {
>>> +            qemu_kvm_destroy_vcpu(cpu);
>>> +            qemu_mutex_unlock(&qemu_global_mutex);
>>> +            return NULL;
>>> +        }
>>>      }
>>>
>>>      return NULL;
>>> @@ -929,6 +952,7 @@ static void tcg_exec_all(void);
>>>  static void *qemu_tcg_cpu_thread_fn(void *arg)
>>>  {
>>>      CPUState *cpu = arg;
>>> +    CPUState *remove_cpu = NULL;
>>>
>>>      qemu_tcg_init_cpu_signals();
>>>      qemu_thread_get_self(cpu->thread);
>>> @@ -961,6 +985,16 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
>>>              }
>>>          }
>>>          qemu_tcg_wait_io_event();
>>> +        CPU_FOREACH(cpu) {
>>> +            if (cpu->exit && !cpu_can_run(cpu)) {
>>> +                remove_cpu = cpu;
>>> +                break;
>>> +            }
>>> +        }
>>> +        if (remove_cpu) {
>>> +            qemu_tcg_destroy_vcpu(remove_cpu);
>>> +            remove_cpu = NULL;
>>> +        }
>>>      }
>>>
>>>      return NULL;
>>> @@ -1316,6 +1350,9 @@ static void tcg_exec_all(void)
>>>                  break;
>>>              }
>>>          } else if (cpu->stop || cpu->stopped) {
>>> +            if (cpu->exit) {
>>> +                next_cpu = CPU_NEXT(cpu);
>>> +            }
>>>              break;
>>>          }
>>>      }
>>> diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
>>> index 174ea36..88e2403 100644
>>> --- a/include/sysemu/kvm.h
>>> +++ b/include/sysemu/kvm.h
>>> @@ -178,6 +178,7 @@ int kvm_has_intx_set_mask(void);
>>>
>>>  int kvm_init_vcpu(CPUState *cpu);
>>>  int kvm_cpu_exec(CPUState *cpu);
>>> +int kvm_destroy_vcpu(CPUState *cpu);
>>>
>>>  #ifdef NEED_CPU_H
>>>
>>> diff --git a/kvm-all.c b/kvm-all.c
>>> index 1402f4f..d0caeff 100644
>>> --- a/kvm-all.c
>>> +++ b/kvm-all.c
>>> @@ -74,6 +74,12 @@ typedef struct KVMSlot
>>>
>>>  typedef struct kvm_dirty_log KVMDirtyLog;
>>>
>>> +struct KVMParkedVcpu {
>>> +    unsigned long vcpu_id;
>>> +    int kvm_fd;
>>> +    QLIST_ENTRY(KVMParkedVcpu) node;
>>> +};
>>> +
>>>  struct KVMState
>>>  {
>>>      KVMSlot *slots;
>>> @@ -108,6 +114,7 @@ struct KVMState
>>>      QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) 
>>> msi_hashtab[KVM_MSI_HASHTAB_SIZE];
>>>      bool direct_msi;
>>>  #endif
>>> +    QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
>>>  };
>>>
>>>  KVMState *kvm_state;
>>> @@ -226,6 +233,53 @@ static int kvm_set_user_memory_region(KVMState *s, 
>>> KVMSlot *slot)
>>>      return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
>>>  }
>>>
>>> +int kvm_destroy_vcpu(CPUState *cpu)
>>> +{
>>> +    KVMState *s = kvm_state;
>>> +    long mmap_size;
>>> +    struct KVMParkedVcpu *vcpu = NULL;
>>> +    int ret = 0;
>>> +
>>> +    DPRINTF("kvm_destroy_vcpu\n");
>>> +
>>> +    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
>>> +    if (mmap_size < 0) {
>>> +        ret = mmap_size;
>>> +        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
>>> +        goto err;
>>> +    }
>>> +
>>> +    ret = munmap(cpu->kvm_run, mmap_size);
>>> +    if (ret < 0) {
>>> +        goto err;
>>> +    }
>>> +
>>> +    vcpu = g_malloc0(sizeof(*vcpu));
>>> +    vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
>>> +    vcpu->kvm_fd = cpu->kvm_fd;
>>> +    QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
>>> +err:
>>> +    return ret;
>>> +}
>>> +
>>> +static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
>>> +{
>>> +    struct KVMParkedVcpu *cpu;
>>> +
>>> +    QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
>>> +        if (cpu->vcpu_id == vcpu_id) {
>>> +            int kvm_fd;
>>> +
>>> +            QLIST_REMOVE(cpu, node);
>>> +            kvm_fd = cpu->kvm_fd;
>>> +            g_free(cpu);
>>> +            return kvm_fd;
>>> +        }
>>> +    }
>>> +
>>> +    return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
>>> +}
>>> +
>>>  int kvm_init_vcpu(CPUState *cpu)
>>>  {
>>>      KVMState *s = kvm_state;
>>> @@ -234,7 +288,7 @@ int kvm_init_vcpu(CPUState *cpu)
>>>
>>>      DPRINTF("kvm_init_vcpu\n");
>>>
>>> -    ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)kvm_arch_vcpu_id(cpu));
>>> +    ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
>>>      if (ret < 0) {
>>>          DPRINTF("kvm_create_vcpu failed\n");
>>>          goto err;
>>> @@ -1404,6 +1458,7 @@ int kvm_init(MachineClass *mc)
>>>  #ifdef KVM_CAP_SET_GUEST_DEBUG
>>>      QTAILQ_INIT(&s->kvm_sw_breakpoints);
>>>  #endif
>>> +    QLIST_INIT(&s->kvm_parked_vcpus);
>>>      s->vmfd = -1;
>>>      s->fd = qemu_open("/dev/kvm", O_RDWR);
>>>      if (s->fd == -1) {
>>> --
>>> 1.7.7
>>>
>> .
>>
>
>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]