[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v2 7/8] x86: Support XFD and AMX xsave data migration
From: |
David Edmondson |
Subject: |
Re: [PATCH v2 7/8] x86: Support XFD and AMX xsave data migration |
Date: |
Mon, 21 Feb 2022 13:30:25 +0000 |
User-agent: |
Gnus/5.13 (Gnus v5.13) Emacs/29.0.50 (gnu/linux) |
On Wednesday, 2022-02-16 at 22:04:33 -08, Yang Zhong wrote:
> From: Zeng Guang <guang.zeng@intel.com>
>
> XFD(eXtended Feature Disable) allows to enable a
> feature on xsave state while preventing specific
> user threads from using the feature.
>
> Support save and restore XFD MSRs if CPUID.D.1.EAX[4]
> enumerate to be valid. Likewise migrate the MSRs and
> related xsave state necessarily.
>
> Signed-off-by: Zeng Guang <guang.zeng@intel.com>
> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> Signed-off-by: Yang Zhong <yang.zhong@intel.com>
Reviewed-by: David Edmondson <david.edmondson@oracle.com>
> ---
> target/i386/cpu.h | 9 +++++++++
> target/i386/kvm/kvm.c | 18 ++++++++++++++++++
> target/i386/machine.c | 42 ++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 69 insertions(+)
>
> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> index de9da38e42..509c16323a 100644
> --- a/target/i386/cpu.h
> +++ b/target/i386/cpu.h
> @@ -505,6 +505,9 @@ typedef enum X86Seg {
>
> #define MSR_VM_HSAVE_PA 0xc0010117
>
> +#define MSR_IA32_XFD 0x000001c4
> +#define MSR_IA32_XFD_ERR 0x000001c5
> +
> #define MSR_IA32_BNDCFGS 0x00000d90
> #define MSR_IA32_XSS 0x00000da0
> #define MSR_IA32_UMWAIT_CONTROL 0xe1
> @@ -873,6 +876,8 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS];
> #define CPUID_7_1_EAX_AVX_VNNI (1U << 4)
> /* AVX512 BFloat16 Instruction */
> #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
> +/* XFD Extend Feature Disabled */
> +#define CPUID_D_1_EAX_XFD (1U << 4)
>
> /* Packets which contain IP payload have LIP values */
> #define CPUID_14_0_ECX_LIP (1U << 31)
> @@ -1617,6 +1622,10 @@ typedef struct CPUX86State {
> uint64_t msr_rtit_cr3_match;
> uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS];
>
> + /* Per-VCPU XFD MSRs */
> + uint64_t msr_xfd;
> + uint64_t msr_xfd_err;
> +
> /* exception/interrupt handling */
> int error_code;
> int exception_is_int;
> diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
> index ff064e3d8f..3dd24b6b0e 100644
> --- a/target/i386/kvm/kvm.c
> +++ b/target/i386/kvm/kvm.c
> @@ -3275,6 +3275,13 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
> env->msr_ia32_sgxlepubkeyhash[3]);
> }
>
> + if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
> + kvm_msr_entry_add(cpu, MSR_IA32_XFD,
> + env->msr_xfd);
> + kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR,
> + env->msr_xfd_err);
> + }
> +
> /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
> * kvm_put_msr_feature_control. */
> }
> @@ -3667,6 +3674,11 @@ static int kvm_get_msrs(X86CPU *cpu)
> kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0);
> }
>
> + if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
> + kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0);
> + kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0);
> + }
> +
> ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
> if (ret < 0) {
> return ret;
> @@ -3963,6 +3975,12 @@ static int kvm_get_msrs(X86CPU *cpu)
> env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0]
> =
> msrs[i].data;
> break;
> + case MSR_IA32_XFD:
> + env->msr_xfd = msrs[i].data;
> + break;
> + case MSR_IA32_XFD_ERR:
> + env->msr_xfd_err = msrs[i].data;
> + break;
> }
> }
>
> diff --git a/target/i386/machine.c b/target/i386/machine.c
> index 6202f47793..1f9d0c46f1 100644
> --- a/target/i386/machine.c
> +++ b/target/i386/machine.c
> @@ -1483,6 +1483,46 @@ static const VMStateDescription vmstate_pdptrs = {
> }
> };
>
> +static bool xfd_msrs_needed(void *opaque)
> +{
> + X86CPU *cpu = opaque;
> + CPUX86State *env = &cpu->env;
> +
> + return !!(env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD);
> +}
> +
> +static const VMStateDescription vmstate_msr_xfd = {
> + .name = "cpu/msr_xfd",
> + .version_id = 1,
> + .minimum_version_id = 1,
> + .needed = xfd_msrs_needed,
> + .fields = (VMStateField[]) {
> + VMSTATE_UINT64(env.msr_xfd, X86CPU),
> + VMSTATE_UINT64(env.msr_xfd_err, X86CPU),
> + VMSTATE_END_OF_LIST()
> + }
> +};
> +
> +static bool amx_xtile_needed(void *opaque)
> +{
> + X86CPU *cpu = opaque;
> + CPUX86State *env = &cpu->env;
> +
> + return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE);
> +}
> +
> +static const VMStateDescription vmstate_amx_xtile = {
> + .name = "cpu/intel_amx_xtile",
> + .version_id = 1,
> + .minimum_version_id = 1,
> + .needed = amx_xtile_needed,
> + .fields = (VMStateField[]) {
> + VMSTATE_UINT8_ARRAY(env.xtilecfg, X86CPU, 64),
> + VMSTATE_UINT8_ARRAY(env.xtiledata, X86CPU, 8192),
> + VMSTATE_END_OF_LIST()
> + }
> +};
> +
> const VMStateDescription vmstate_x86_cpu = {
> .name = "cpu",
> .version_id = 12,
> @@ -1622,6 +1662,8 @@ const VMStateDescription vmstate_x86_cpu = {
> &vmstate_msr_tsx_ctrl,
> &vmstate_msr_intel_sgx,
> &vmstate_pdptrs,
> + &vmstate_msr_xfd,
> + &vmstate_amx_xtile,
> NULL
> }
> };
dme.
--
All of us, we're going out tonight. We're gonna walk all over your cars.
- Re: [PATCH v2 2/8] x86: Add AMX XTILECFG and XTILEDATA components, (continued)
- [PATCH v2 3/8] x86: Grant AMX permission for guest, Yang Zhong, 2022/02/17
- [PATCH v2 4/8] x86: Add XFD faulting bit for state components, Yang Zhong, 2022/02/17
- [PATCH v2 5/8] x86: Add AMX CPUIDs enumeration, Yang Zhong, 2022/02/17
- [PATCH v2 6/8] x86: add support for KVM_CAP_XSAVE2 and AMX state migration, Yang Zhong, 2022/02/17
- [PATCH v2 7/8] x86: Support XFD and AMX xsave data migration, Yang Zhong, 2022/02/17
- Re: [PATCH v2 7/8] x86: Support XFD and AMX xsave data migration,
David Edmondson <=
- [PATCH v2 8/8] linux-header: Sync the linux headers, Yang Zhong, 2022/02/17