qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC PATCH v2.1 10/12] spapr: CPU hotplug support


From: Igor Mammedov
Subject: Re: [Qemu-devel] [RFC PATCH v2.1 10/12] spapr: CPU hotplug support
Date: Fri, 6 May 2016 10:57:21 +0200

On Thu, 31 Mar 2016 14:09:19 +0530
Bharata B Rao <address@hidden> wrote:

> Set up device tree entries for the hotplugged CPU core and use the
> exising RTAS event logging infrastructure to send CPU hotplug notification
> to the guest.

perhaps you could reuse:

[RFC,20/42] machine: add cpu-hotplug machine option
http://patchwork.ozlabs.org/patch/617516/

to enable cpu hotplug explicitly,
and by default machine would be hotplug less saving resources.

> 
> Signed-off-by: Bharata B Rao <address@hidden>
> ---
>  hw/ppc/spapr.c                  | 58 ++++++++++++++++++++++++++++++++++
>  hw/ppc/spapr_cpu_core.c         | 70 
> +++++++++++++++++++++++++++++++++++++++++
>  hw/ppc/spapr_events.c           |  3 ++
>  hw/ppc/spapr_rtas.c             | 24 ++++++++++++++
>  include/hw/ppc/spapr.h          |  2 ++
>  include/hw/ppc/spapr_cpu_core.h |  2 ++
>  6 files changed, 159 insertions(+)
> 
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index 1ead043..1a5dbd9 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -603,6 +603,16 @@ static void spapr_populate_cpu_dt(CPUState *cs, void 
> *fdt, int offset,
>      size_t page_sizes_prop_size;
>      uint32_t vcpus_per_socket = smp_threads * smp_cores;
>      uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
> +    sPAPRDRConnector *drc;
> +    sPAPRDRConnectorClass *drck;
> +    int drc_index;
> +
> +    drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index);
> +    if (drc) {
> +        drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
> +        drc_index = drck->get_index(drc);
> +        _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
> +    }
>  
>      /* Note: we keep CI large pages off for now because a 64K capable guest
>       * provisioned with large pages might otherwise try to map a qemu
> @@ -987,6 +997,16 @@ static void spapr_finalize_fdt(sPAPRMachineState *spapr,
>          _FDT(spapr_drc_populate_dt(fdt, 0, NULL, 
> SPAPR_DR_CONNECTOR_TYPE_LMB));
>      }
>  
> +    if (smc->dr_cpu_enabled) {
> +        int offset = fdt_path_offset(fdt, "/cpus");
> +        ret = spapr_drc_populate_dt(fdt, offset, NULL,
> +                                    SPAPR_DR_CONNECTOR_TYPE_CPU);
> +        if (ret < 0) {
> +            error_report("Couldn't set up CPU DR device tree properties");
> +            exit(1);
> +        }
> +    }
> +
>      _FDT((fdt_pack(fdt)));
>  
>      if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
> @@ -1622,6 +1642,8 @@ static void spapr_boot_set(void *opaque, const char 
> *boot_device,
>  void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu, Error **errp)
>  {
>      CPUPPCState *env = &cpu->env;
> +    CPUState *cs = CPU(cpu);
> +    int i;
>  
>      /* Set time-base frequency to 512 MHz */
>      cpu_ppc_tb_init(env, TIMEBASE_FREQ);
> @@ -1646,6 +1668,14 @@ void spapr_cpu_init(sPAPRMachineState *spapr, 
> PowerPCCPU *cpu, Error **errp)
>          }
>      }
>  
> +    /* Set NUMA node for the added CPUs  */
> +    for (i = 0; i < nb_numa_nodes; i++) {
> +        if (test_bit(cs->cpu_index, numa_info[i].node_cpu)) {
> +            cs->numa_node = i;
> +            break;
> +        }
> +    }
> +
>      xics_cpu_setup(spapr->icp, cpu);
>  
>      qemu_register_reset(spapr_cpu_reset, cpu);
> @@ -1825,6 +1855,11 @@ static void ppc_spapr_init(MachineState *machine)
>  
>          for (i = 0; i < spapr_max_cores; i++) {
>              int core_dt_id = i * smt;
> +            sPAPRDRConnector *drc =
> +                spapr_dr_connector_new(OBJECT(spapr),
> +                                       SPAPR_DR_CONNECTOR_TYPE_CPU, 
> core_dt_id);
> +
> +            qemu_register_reset(spapr_drc_reset, drc);
>  
>              if (i < spapr_cores) {
>                  char *type = spapr_get_cpu_core_type(machine->cpu_model);
> @@ -2247,6 +2282,27 @@ out:
>      error_propagate(errp, local_err);
>  }
>  
> +void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
> +                                    sPAPRMachineState *spapr)
> +{
> +    PowerPCCPU *cpu = POWERPC_CPU(cs);
> +    DeviceClass *dc = DEVICE_GET_CLASS(cs);
> +    int id = ppc_get_vcpu_dt_id(cpu);
> +    void *fdt;
> +    int offset, fdt_size;
> +    char *nodename;
> +
> +    fdt = create_device_tree(&fdt_size);
> +    nodename = g_strdup_printf("address@hidden", dc->fw_name, id);
> +    offset = fdt_add_subnode(fdt, 0, nodename);
> +
> +    spapr_populate_cpu_dt(cs, fdt, offset, spapr);
> +    g_free(nodename);
> +
> +    *fdt_offset = offset;
> +    return fdt;
> +}
> +
>  static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
>                                        DeviceState *dev, Error **errp)
>  {
> @@ -2287,6 +2343,8 @@ static void spapr_machine_device_plug(HotplugHandler 
> *hotplug_dev,
>          }
>  
>          spapr_memory_plug(hotplug_dev, dev, node, errp);
> +    } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
> +        spapr_core_plug(hotplug_dev, dev, errp);
>      }
>  }
>  
> diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
> index 640d143..a9ba843 100644
> --- a/hw/ppc/spapr_cpu_core.c
> +++ b/hw/ppc/spapr_cpu_core.c
> @@ -18,6 +18,7 @@
>  void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
>                           Error **errp)
>  {
> +    sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
>      sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
looks like hotplug_dev is machine,
so why don't just cast it instead of qdev_get_machine().

the same applies to other plug handlers.

>      int spapr_max_cores = max_cpus / smp_threads;
>      int index;
> @@ -25,6 +26,11 @@ void spapr_core_pre_plug(HotplugHandler *hotplug_dev, 
> DeviceState *dev,
>      Error *local_err = NULL;
>      CPUCore *cc = CPU_CORE(dev);
>  
> +    if (!smc->dr_cpu_enabled && dev->hotplugged) {
> +        error_setg(&local_err, "CPU hotplug not supported for this machine");
> +        goto out;
> +    }
> +
>      if (cc->threads != smp_threads) {
>          error_setg(&local_err, "threads must be %d", smp_threads);
>          goto out;
> @@ -49,6 +55,70 @@ out:
>      error_propagate(errp, local_err);
>  }
>  
> +void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
> +                     Error **errp)
> +{
> +    sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
> +    sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
> +    sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
> +    CPUCore *cc = CPU_CORE(dev);
> +    CPUState *cs = CPU(&core->threads[0]);
> +    sPAPRDRConnector *drc;
> +    sPAPRDRConnectorClass *drck;
> +    Error *local_err = NULL;
> +    void *fdt = NULL;
> +    int fdt_offset = 0;
> +    int index;
> +    int smt = kvmppc_smt_threads();
> +
> +    drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, cc->core);
> +    index = cc->core / smt;
> +    spapr->cores[index] = OBJECT(dev);
> +
> +    if (!smc->dr_cpu_enabled) {
> +        /*
> +         * This is a cold plugged CPU core but the machine doesn't support
> +         * DR. So skip the hotplug path ensuring that the core is brought
> +         * up online with out an associated DR connector.
> +         */
> +        return;
> +    }
> +
> +    g_assert(drc);
> +
> +    /*
> +     * Setup CPU DT entries only for hotplugged CPUs. For boot time or
> +     * coldplugged CPUs DT entries are setup in spapr_finalize_fdt().
> +     */
> +    if (dev->hotplugged) {
> +        fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
> +        dev->hotplugged = true;
> +    }
> +
> +    drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
> +    drck->attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, &local_err);
> +    if (local_err) {
> +        g_free(fdt);
> +        spapr->cores[index] = NULL;
> +        error_propagate(errp, local_err);
> +        return;
> +    }
> +
> +    if (dev->hotplugged) {
> +        /*
> +         * Send hotplug notification interrupt to the guest only in case
> +         * of hotplugged CPUs.
> +         */
> +        spapr_hotplug_req_add_by_index(drc);
> +    } else {
> +        /*
> +         * Set the right DRC states for cold plugged CPU.
> +         */
> +        drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
> +        drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
> +    }
> +}
> +
>  static const TypeInfo spapr_cpu_core_type_info = {
>      .name = TYPE_SPAPR_CPU_CORE,
>      .parent = TYPE_CPU_CORE,
> diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
> index 39f4682..10340e1 100644
> --- a/hw/ppc/spapr_events.c
> +++ b/hw/ppc/spapr_events.c
> @@ -437,6 +437,9 @@ static void spapr_hotplug_req_event(uint8_t hp_id, 
> uint8_t hp_action,
>      case SPAPR_DR_CONNECTOR_TYPE_LMB:
>          hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY;
>          break;
> +    case SPAPR_DR_CONNECTOR_TYPE_CPU:
> +        hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_CPU;
> +        break;
>      default:
>          /* we shouldn't be signaling hotplug events for resources
>           * that don't support them
> diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
> index b7c5ebd..cc0369e 100644
> --- a/hw/ppc/spapr_rtas.c
> +++ b/hw/ppc/spapr_rtas.c
> @@ -34,6 +34,7 @@
>  
>  #include "hw/ppc/spapr.h"
>  #include "hw/ppc/spapr_vio.h"
> +#include "hw/ppc/ppc.h"
>  #include "qapi-event.h"
>  #include "hw/boards.h"
>  
> @@ -161,6 +162,27 @@ static void rtas_query_cpu_stopped_state(PowerPCCPU 
> *cpu_,
>      rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
>  }
>  
> +/*
> + * Set the timebase offset of the CPU to that of first CPU.
> + * This helps hotplugged CPU to have the correct timebase offset.
> + */
> +static void spapr_cpu_update_tb_offset(PowerPCCPU *cpu)
> +{
> +    PowerPCCPU *fcpu = POWERPC_CPU(first_cpu);
> +
> +    cpu->env.tb_env->tb_offset = fcpu->env.tb_env->tb_offset;
> +}
> +
> +static void spapr_cpu_set_endianness(PowerPCCPU *cpu)
> +{
> +    PowerPCCPU *fcpu = POWERPC_CPU(first_cpu);
> +    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(fcpu);
> +
> +    if (!pcc->interrupts_big_endian(fcpu)) {
> +        cpu->env.spr[SPR_LPCR] |= LPCR_ILE;
> +    }
> +}
> +
>  static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPRMachineState *spapr,
>                             uint32_t token, uint32_t nargs,
>                             target_ulong args,
> @@ -197,6 +219,8 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, 
> sPAPRMachineState *spapr,
>          env->nip = start;
>          env->gpr[3] = r3;
>          cs->halted = 0;
> +        spapr_cpu_set_endianness(cpu);
> +        spapr_cpu_update_tb_offset(cpu);
>  
>          qemu_cpu_kick(cs);
>  
> diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
> index a6956c0..619db98 100644
> --- a/include/hw/ppc/spapr.h
> +++ b/include/hw/ppc/spapr.h
> @@ -588,6 +588,8 @@ void spapr_hotplug_req_add_by_count(sPAPRDRConnectorType 
> drc_type,
>  void spapr_hotplug_req_remove_by_count(sPAPRDRConnectorType drc_type,
>                                            uint32_t count);
>  void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu, Error **errp);
> +void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
> +                                    sPAPRMachineState *spapr);
>  
>  /* rtas-configure-connector state */
>  struct sPAPRConfigureConnectorState {
> diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
> index f08f291..165af7c 100644
> --- a/include/hw/ppc/spapr_cpu_core.h
> +++ b/include/hw/ppc/spapr_cpu_core.h
> @@ -63,4 +63,6 @@ typedef struct POWER8sPAPRCPUCore {
>  void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
>                           Error **errp);
>  char *spapr_get_cpu_core_type(const char *model);
> +void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
> +                     Error **errp);
>  #endif




reply via email to

[Prev in Thread] Current Thread [Next in Thread]