qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PULL 5/9] hw/i386: Move arch_id decode inside x86_cpus_init


From: Eduardo Habkost
Subject: [PULL 5/9] hw/i386: Move arch_id decode inside x86_cpus_init
Date: Thu, 2 Apr 2020 19:20:47 -0300

From: Babu Moger <address@hidden>

Apicid calculation depends on knowing the total number of numa nodes
for EPYC cpu models. Right now, we are calculating the arch_id while
parsing the numa(parse_numa). At this time, it is not known how many
total numa nodes are configured in the system.

Move the arch_id calculation inside x86_cpus_init. At this time, smp
parse is already completed and numa node information is available.

Override the handlers if use_epyc_apic_id_encoding is enabled in
cpu model definition.

Also replace the calling convention to use handlers from
X86MachineState.

Signed-off-by: Babu Moger <address@hidden>
Message-Id: <address@hidden>
Signed-off-by: Eduardo Habkost <address@hidden>
---
 hw/i386/pc.c  |  6 +++---
 hw/i386/x86.c | 37 ++++++++++++++++++++++++++++++-------
 2 files changed, 33 insertions(+), 10 deletions(-)

diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 0bf0aaca52..b58925d063 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1580,14 +1580,14 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
         topo_ids.die_id = cpu->die_id;
         topo_ids.core_id = cpu->core_id;
         topo_ids.smt_id = cpu->thread_id;
-        cpu->apic_id = x86_apicid_from_topo_ids(&topo_info, &topo_ids);
+        cpu->apic_id = x86ms->apicid_from_topo_ids(&topo_info, &topo_ids);
     }
 
     cpu_slot = pc_find_cpu_slot(MACHINE(pcms), cpu->apic_id, &idx);
     if (!cpu_slot) {
         MachineState *ms = MACHINE(pcms);
 
-        x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
+        x86ms->topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
         error_setg(errp,
             "Invalid CPU [socket: %u, die: %u, core: %u, thread: %u] with"
             " APIC ID %" PRIu32 ", valid index range 0:%d",
@@ -1608,7 +1608,7 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
     /* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn()
      * once -smp refactoring is complete and there will be CPU private
      * CPUState::nr_cores and CPUState::nr_threads fields instead of globals */
-    x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
+    x86ms->topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
     if (cpu->socket_id != -1 && cpu->socket_id != topo_ids.pkg_id) {
         error_setg(errp, "property socket-id: %u doesn't match set apic-id:"
             " 0x%x (socket-id: %u)", cpu->socket_id, cpu->apic_id,
diff --git a/hw/i386/x86.c b/hw/i386/x86.c
index 2168fc56c0..b82770024c 100644
--- a/hw/i386/x86.c
+++ b/hw/i386/x86.c
@@ -68,6 +68,22 @@ inline void init_topo_info(X86CPUTopoInfo *topo_info,
     topo_info->threads_per_core = ms->smp.threads;
 }
 
+/*
+ * Set up with the new EPYC topology handlers
+ *
+ * AMD uses different apic id encoding for EPYC based cpus. Override
+ * the default topo handlers with EPYC encoding handlers.
+ */
+static void x86_set_epyc_topo_handlers(MachineState *machine)
+{
+    X86MachineState *x86ms = X86_MACHINE(machine);
+
+    x86ms->apicid_from_cpu_idx = x86_apicid_from_cpu_idx_epyc;
+    x86ms->topo_ids_from_apicid = x86_topo_ids_from_apicid_epyc;
+    x86ms->apicid_from_topo_ids = x86_apicid_from_topo_ids_epyc;
+    x86ms->apicid_pkg_offset = apicid_pkg_offset_epyc;
+}
+
 /*
  * Calculates initial APIC ID for a specific CPU index
  *
@@ -86,7 +102,7 @@ uint32_t x86_cpu_apic_id_from_index(X86MachineState *x86ms,
 
     init_topo_info(&topo_info, x86ms);
 
-    correct_id = x86_apicid_from_cpu_idx(&topo_info, cpu_index);
+    correct_id = x86ms->apicid_from_cpu_idx(&topo_info, cpu_index);
     if (x86mc->compat_apic_id_mode) {
         if (cpu_index != correct_id && !warned && !qtest_enabled()) {
             error_report("APIC IDs set in compatibility mode, "
@@ -121,6 +137,11 @@ void x86_cpus_init(X86MachineState *x86ms, int 
default_cpu_version)
     MachineState *ms = MACHINE(x86ms);
     MachineClass *mc = MACHINE_GET_CLASS(x86ms);
 
+    /* Check for apicid encoding */
+    if (cpu_x86_use_epyc_apic_id_encoding(ms->cpu_type)) {
+        x86_set_epyc_topo_handlers(ms);
+    }
+
     x86_cpu_set_default_version(default_cpu_version);
 
     /*
@@ -134,6 +155,12 @@ void x86_cpus_init(X86MachineState *x86ms, int 
default_cpu_version)
     x86ms->apic_id_limit = x86_cpu_apic_id_from_index(x86ms,
                                                       ms->smp.max_cpus - 1) + 
1;
     possible_cpus = mc->possible_cpu_arch_ids(ms);
+
+    for (i = 0; i < ms->possible_cpus->len; i++) {
+        ms->possible_cpus->cpus[i].arch_id =
+            x86_cpu_apic_id_from_index(x86ms, i);
+    }
+
     for (i = 0; i < ms->smp.cpus; i++) {
         x86_cpu_new(x86ms, possible_cpus->cpus[i].arch_id, &error_fatal);
     }
@@ -158,8 +185,7 @@ int64_t x86_get_default_cpu_node_id(const MachineState *ms, 
int idx)
    init_topo_info(&topo_info, x86ms);
 
    assert(idx < ms->possible_cpus->len);
-   x86_topo_ids_from_apicid(ms->possible_cpus->cpus[idx].arch_id,
-                            &topo_info, &topo_ids);
+   x86_topo_ids_from_idx(&topo_info, idx, &topo_ids);
    return topo_ids.pkg_id % ms->numa_state->num_nodes;
 }
 
@@ -190,10 +216,7 @@ const CPUArchIdList 
*x86_possible_cpu_arch_ids(MachineState *ms)
 
         ms->possible_cpus->cpus[i].type = ms->cpu_type;
         ms->possible_cpus->cpus[i].vcpus_count = 1;
-        ms->possible_cpus->cpus[i].arch_id =
-            x86_cpu_apic_id_from_index(x86ms, i);
-        x86_topo_ids_from_apicid(ms->possible_cpus->cpus[i].arch_id,
-                                 &topo_info, &topo_ids);
+        x86_topo_ids_from_idx(&topo_info, i, &topo_ids);
         ms->possible_cpus->cpus[i].props.has_socket_id = true;
         ms->possible_cpus->cpus[i].props.socket_id = topo_ids.pkg_id;
         if (x86ms->smp_dies > 1) {
-- 
2.24.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]