[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v2 3/8] x86: Grant AMX permission for guest
From: |
Paolo Bonzini |
Subject: |
Re: [PATCH v2 3/8] x86: Grant AMX permission for guest |
Date: |
Thu, 17 Feb 2022 14:44:10 +0100 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 Thunderbird/91.5.0 |
On 2/17/22 06:58, Yang Zhong wrote:
+
+ if ((mask & XSTATE_XTILE_DATA_MASK) == XSTATE_XTILE_DATA_MASK) {
+ bitmask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
+ if (!(bitmask & XSTATE_XTILE_DATA_MASK)) {
Paolo, last time you suggested below changes for here:
rc = kvm_arch_get_supported_cpuid(s, 0xd, 0,
(xdata_bit < 32 ? R_EAX : R_EDX));
if (!(rc & BIT(xdata_bit & 31)) {
...
}
Since I used "mask" as parameter here, so I had to directly use R_EAX here.
Please review and if need change it to like "(xdata_bit < 32 ? R_EAX :
R_EDX)",
I will change this in next version, thanks!
I looked at this function more closely because it didn't compile on non-Linux
systems, too. I think it's better to write it already to plan for more
dynamic features. In the code below, I'm also relying on
KVM_GET_SUPPORTED_CPUID/KVM_X86_COMP_GUEST_SUPP being executed
before ARCH_REQ_XCOMP_GUEST_PERM, which therefore cannot fail.
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 377d993438..1d0c006077 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -43,8 +43,6 @@
#include "disas/capstone.h"
#include "cpu-internal.h"
-#include <sys/syscall.h>
-
/* Helpers for building CPUID[2] descriptors: */
struct CPUID2CacheDescriptorInfo {
@@ -6002,40 +6000,6 @@ static void x86_cpu_adjust_feat_level(X86CPU *cpu,
FeatureWord w)
}
}
-static void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
-{
- KVMState *s = kvm_state;
- uint64_t bitmask;
- long rc;
-
- if ((mask & XSTATE_XTILE_DATA_MASK) == XSTATE_XTILE_DATA_MASK) {
- bitmask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
- if (!(bitmask & XSTATE_XTILE_DATA_MASK)) {
- warn_report("no amx support from supported_xcr0, "
- "bitmask:0x%lx", bitmask);
- return;
- }
-
- rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM,
- XSTATE_XTILE_DATA_BIT);
- if (rc) {
- /*
- * The older kernel version(<5.15) can't support
- * ARCH_REQ_XCOMP_GUEST_PERM and directly return.
- */
- return;
- }
-
- rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
- if (rc) {
- warn_report("prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
- } else if (!(bitmask & XFEATURE_XTILE_MASK)) {
- warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
- "and bitmask=0x%lx", bitmask);
- }
- }
-}
-
/* Calculate XSAVE components based on the configured CPU feature flags */
static void x86_cpu_enable_xsave_components(X86CPU *cpu)
{
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index d4ad0f56bd..de949bd63d 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -551,11 +551,8 @@ typedef enum X86Seg {
#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
#define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT)
-#define XFEATURE_XTILE_MASK (XSTATE_XTILE_CFG_MASK \
- | XSTATE_XTILE_DATA_MASK)
-#define ARCH_GET_XCOMP_GUEST_PERM 0x1024
-#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025
+#define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK)
#define ESA_FEATURE_ALIGN64_BIT 1
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index 3bdcd724c4..4b07778970 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -17,6 +17,7 @@
#include "qapi/error.h"
#include <sys/ioctl.h>
#include <sys/utsname.h>
+#include <sys/syscall.h>
#include <linux/kvm.h>
#include "standard-headers/asm-x86/kvm_para.h"
@@ -5168,3 +5169,39 @@ bool kvm_arch_cpu_check_are_resettable(void)
{
return !sev_es_enabled();
}
+
+#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025
+
+void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
+{
+ KVMState *s = kvm_state;
+ uint64_t supported;
+
+ mask &= XSTATE_DYNAMIC_MASK;
+ if (!mask) {
+ return;
+ }
+ /*
+ * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0].
+ * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned
+ * about them already because they are not supported features.
+ */
+ supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
+ supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) <<
32;
+ mask &= ~supported;
+
+ while (mask) {
+ int bit = ctz64(mask);
+ int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit);
+ if (rc) {
+ /*
+ * Older kernel version (<5.17) do not support
+ * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return
+ * any dynamic feature from kvm_arch_get_supported_cpuid.
+ */
+ warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
+ "for feature bit %d", bit);
+ }
+ mask &= ~BIT_ULL(bit);
+ }
+}
diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h
index a978509d50..4124912c20 100644
--- a/target/i386/kvm/kvm_i386.h
+++ b/target/i386/kvm/kvm_i386.h
@@ -52,5 +52,6 @@ bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp);
uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address);
bool kvm_enable_sgx_provisioning(KVMState *s);
+void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask);
#endif
If this works, the rest of the series is good to go!
Thanks,
Paolo
- [PATCH v2 0/8] AMX support in Qemu, Yang Zhong, 2022/02/17
- [PATCH v2 4/8] x86: Add XFD faulting bit for state components, Yang Zhong, 2022/02/17
- [PATCH v2 5/8] x86: Add AMX CPUIDs enumeration, Yang Zhong, 2022/02/17
- [PATCH v2 6/8] x86: add support for KVM_CAP_XSAVE2 and AMX state migration, Yang Zhong, 2022/02/17
- [PATCH v2 7/8] x86: Support XFD and AMX xsave data migration, Yang Zhong, 2022/02/17