[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 15/18] i386: Use CPUCacheInfo.share_level to encode CPUID[4].EAX[
From: |
Zhao Liu |
Subject: |
[PATCH 15/18] i386: Use CPUCacheInfo.share_level to encode CPUID[4].EAX[bits 25:14] |
Date: |
Thu, 2 Feb 2023 17:49:26 +0800 |
From: Zhao Liu <zhao1.liu@intel.com>
CPUID[4].EAX[bits 25:14] is used to represent the cache topology for
intel CPUs.
After cache models have topology information, we can use
CPUCacheInfo.share_level to decide which topology level to be encoded
into CPUID[4].EAX[bits 25:14].
Additionally, since maximum_processor_id (original "num_apic_ids") is
parsed based on cpu topology levels, which are verified when parsing
smp, it's no need to check this value by "assert(num_apic_ids > 0)"
again, so remove this assert.
Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
---
target/i386/cpu.c | 55 +++++++++++++++++++++++++++++++----------------
1 file changed, 36 insertions(+), 19 deletions(-)
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 364534e84b1b..96ef96860604 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -231,22 +231,50 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo
*cache)
((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
0 /* Invalid value */)
+static uint32_t max_processor_ids_for_cache(CPUCacheInfo *cache,
+ X86CPUTopoInfo *topo_info)
+{
+ uint32_t num_ids = 0;
+
+ switch (cache->share_level) {
+ case CORE:
+ num_ids = 1 << apicid_core_offset(topo_info);
+ break;
+ case DIE:
+ num_ids = 1 << apicid_die_offset(topo_info);
+ break;
+ default:
+ /*
+ * Currently there is no use case for SMT, MODULE and PACKAGE, so use
+ * assert directly to facilitate debugging.
+ */
+ g_assert_not_reached();
+ }
+
+ return num_ids - 1;
+}
+
+static uint32_t max_core_ids_in_package(X86CPUTopoInfo *topo_info)
+{
+ uint32_t num_cores = 1 << (apicid_pkg_offset(topo_info) -
+ apicid_core_offset(topo_info));
+ return num_cores - 1;
+}
/* Encode cache info for CPUID[4] */
static void encode_cache_cpuid4(CPUCacheInfo *cache,
- int num_apic_ids, int num_cores,
+ X86CPUTopoInfo *topo_info,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
{
assert(cache->size == cache->line_size * cache->associativity *
cache->partitions * cache->sets);
- assert(num_apic_ids > 0);
*eax = CACHE_TYPE(cache->type) |
CACHE_LEVEL(cache->level) |
(cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
- ((num_cores - 1) << 26) |
- ((num_apic_ids - 1) << 14);
+ (max_core_ids_in_package(topo_info) << 26) |
+ (max_processor_ids_for_cache(cache, topo_info) << 14);
assert(cache->line_size > 0);
assert(cache->partitions > 0);
@@ -5335,38 +5363,27 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
uint32_t count,
*eax = *ebx = *ecx = *edx = 0;
} else {
*eax = 0;
- int addressable_cores_offset = apicid_pkg_offset(&topo_info) -
- apicid_core_offset(&topo_info);
- int core_offset, die_offset;
switch (count) {
case 0: /* L1 dcache info */
- core_offset = apicid_core_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
- (1 << core_offset),
- (1 << addressable_cores_offset),
+ &topo_info,
eax, ebx, ecx, edx);
break;
case 1: /* L1 icache info */
- core_offset = apicid_core_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
- (1 << core_offset),
- (1 << addressable_cores_offset),
+ &topo_info,
eax, ebx, ecx, edx);
break;
case 2: /* L2 cache info */
- core_offset = apicid_core_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
- (1 << core_offset),
- (1 << addressable_cores_offset),
+ &topo_info,
eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
- die_offset = apicid_die_offset(&topo_info);
if (cpu->enable_l3_cache) {
encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
- (1 << die_offset),
- (1 << addressable_cores_offset),
+ &topo_info,
eax, ebx, ecx, edx);
break;
}
--
2.34.1
- [PATCH 04/18] i386/cpu: Fix number of addressable IDs in CPUID.04H, (continued)
- [PATCH 04/18] i386/cpu: Fix number of addressable IDs in CPUID.04H, Zhao Liu, 2023/02/02
- [PATCH 05/18] i386/cpu: Consolidate the use of topo_info in cpu_x86_cpuid(), Zhao Liu, 2023/02/02
- [PATCH 06/18] i386: Introduce module-level cpu topology to CPUX86State, Zhao Liu, 2023/02/02
- [PATCH 07/18] i386: Support modules_per_die in X86CPUTopoInfo, Zhao Liu, 2023/02/02
- [PATCH 09/18] i386: Fix comment style in topology.h, Zhao Liu, 2023/02/02
- [PATCH 10/18] i386: Update APIC ID parsing rule to support module level, Zhao Liu, 2023/02/02
- [PATCH 08/18] i386: Support module_id in X86CPUTopoIDs, Zhao Liu, 2023/02/02
- [PATCH 11/18] i386/cpu: Introduce cluster-id to X86CPU, Zhao Liu, 2023/02/02
- [PATCH 12/18] tests: Add test case of APIC ID for module level parsing, Zhao Liu, 2023/02/02
- [PATCH 13/18] hw/i386/pc: Support smp.clusters for x86 PC machine, Zhao Liu, 2023/02/02
- [PATCH 15/18] i386: Use CPUCacheInfo.share_level to encode CPUID[4].EAX[bits 25:14],
Zhao Liu <=
- [PATCH 16/18] i386: Fix NumSharingCache for CPUID[0x8000001D].EAX[bits 25:14], Zhao Liu, 2023/02/02
- [PATCH 14/18] i386: Add cache topology info in CPUCacheInfo, Zhao Liu, 2023/02/02
- [PATCH 17/18] i386: Use CPUCacheInfo.share_level to encode CPUID[0x8000001D].EAX[bits 25:14], Zhao Liu, 2023/02/02
- [PATCH 18/18] i386: Add new property to control L2 cache topo in CPUID.04H, Zhao Liu, 2023/02/02