Message ID | 20211229131328.12283-11-yang.zhong@intel.com |
---|---|
State | Superseded |
Headers | show |
Series | AMX Support in KVM | expand |
On Wed, Dec 29, 2021, Yang Zhong wrote: > From: Jing Liu <jing2.liu@intel.com> > > Intel's eXtended Feature Disable (XFD) feature allows the software > to dynamically adjust fpstate buffer size for XSAVE features which > have large state. > > Because fpstate has been expanded for all possible dynamic xstates > at KVM_SET_CPUID2, emulation of the IA32_XFD MSR is straightforward. > For write just call fpu_update_guest_xfd() to update the guest fpu > container once all the sanity checks are passed. For read then > return the cached value in the container. > > Signed-off-by: Zeng Guang <guang.zeng@intel.com> > Signed-off-by: Wei Wang <wei.w.wang@intel.com> > Signed-off-by: Jing Liu <jing2.liu@intel.com> > Signed-off-by: Yang Zhong <yang.zhong@intel.com> > --- > arch/x86/kvm/x86.c | 27 +++++++++++++++++++++++++++ > 1 file changed, 27 insertions(+) > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index e50e97ac4408..36677b754ac9 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -1359,6 +1359,7 @@ static const u32 msrs_to_save_all[] = { > MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, > MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, > MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, > + MSR_IA32_XFD, > }; > > static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; > @@ -3669,6 +3670,19 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > return 1; > vcpu->arch.msr_misc_features_enables = data; > break; > +#ifdef CONFIG_X86_64 > + case MSR_IA32_XFD: > + if (!msr_info->host_initiated && > + !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) > + return 1; > + > + if (data & ~(XFEATURE_MASK_USER_DYNAMIC & > + vcpu->arch.guest_supported_xcr0)) > + return 1; > + > + fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); > + break; > +#endif > default: > if (kvm_pmu_is_valid_msr(vcpu, msr)) > return kvm_pmu_set_msr(vcpu, msr_info); > @@ -3989,6 +4003,15 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > case MSR_K7_HWCR: > msr_info->data = vcpu->arch.msr_hwcr; > break; > +#ifdef CONFIG_X86_64 > + case MSR_IA32_XFD: > + if (!msr_info->host_initiated && > + !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) > + return 1; > + > + msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; > + break; > +#endif > default: > if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) > return kvm_pmu_get_msr(vcpu, msr_info); > @@ -6422,6 +6445,10 @@ static void kvm_init_msr_list(void) > min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) > continue; > break; > + case MSR_IA32_XFD: > + if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) > + continue; I suspect the 32-bit host support is wrong. The kernel's handle_xfd_event() checks for 64-bit support in addition to the CPU feature itself, which implies that the feature can be reported in boot_cpu_data for 32-bit kernels. static bool handle_xfd_event(struct pt_regs *regs) { u64 xfd_err; int err; if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD)) return false; ... } In this specific case, that means KVM will tell userspace it needs to mgirate MSR_IA32_XFD, and then reject attempts to read/write the MSR. If 32-bit host kernels do not explicitly suppress X86_FEATURE_XFD, then KVM needs to do: diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 556555537a18..156ce332d55b 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -455,9 +455,11 @@ void kvm_set_cpu_caps(void) #ifdef CONFIG_X86_64 unsigned int f_gbpages = F(GBPAGES); unsigned int f_lm = F(LM); + unsigned int f_xfd = F(XFD); #else unsigned int f_gbpages = 0; unsigned int f_lm = 0; + unsigned int f_xfd = 0; #endif memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); @@ -545,7 +547,7 @@ void kvm_set_cpu_caps(void) ); kvm_cpu_cap_mask(CPUID_D_1_EAX, - F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | F(XFD) + F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd ); kvm_cpu_cap_init_scattered(CPUID_12_EAX, > + break; > default: > break; > }
> From: Sean Christopherson <seanjc@google.com> > Sent: Wednesday, January 5, 2022 3:33 AM > > On Wed, Dec 29, 2021, Yang Zhong wrote: > > From: Jing Liu <jing2.liu@intel.com> > > > > Intel's eXtended Feature Disable (XFD) feature allows the software > > to dynamically adjust fpstate buffer size for XSAVE features which > > have large state. > > > > Because fpstate has been expanded for all possible dynamic xstates > > at KVM_SET_CPUID2, emulation of the IA32_XFD MSR is straightforward. > > For write just call fpu_update_guest_xfd() to update the guest fpu > > container once all the sanity checks are passed. For read then > > return the cached value in the container. > > > > Signed-off-by: Zeng Guang <guang.zeng@intel.com> > > Signed-off-by: Wei Wang <wei.w.wang@intel.com> > > Signed-off-by: Jing Liu <jing2.liu@intel.com> > > Signed-off-by: Yang Zhong <yang.zhong@intel.com> > > --- > > arch/x86/kvm/x86.c | 27 +++++++++++++++++++++++++++ > > 1 file changed, 27 insertions(+) > > > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > > index e50e97ac4408..36677b754ac9 100644 > > --- a/arch/x86/kvm/x86.c > > +++ b/arch/x86/kvm/x86.c > > @@ -1359,6 +1359,7 @@ static const u32 msrs_to_save_all[] = { > > MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, > MSR_F15H_PERF_CTL5, > > MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, > MSR_F15H_PERF_CTR2, > > MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, > MSR_F15H_PERF_CTR5, > > + MSR_IA32_XFD, > > }; > > > > static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; > > @@ -3669,6 +3670,19 @@ int kvm_set_msr_common(struct kvm_vcpu > *vcpu, struct msr_data *msr_info) > > return 1; > > vcpu->arch.msr_misc_features_enables = data; > > break; > > +#ifdef CONFIG_X86_64 > > + case MSR_IA32_XFD: > > + if (!msr_info->host_initiated && > > + !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) > > + return 1; > > + > > + if (data & ~(XFEATURE_MASK_USER_DYNAMIC & > > + vcpu->arch.guest_supported_xcr0)) > > + return 1; > > + > > + fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); > > + break; > > +#endif > > default: > > if (kvm_pmu_is_valid_msr(vcpu, msr)) > > return kvm_pmu_set_msr(vcpu, msr_info); > > @@ -3989,6 +4003,15 @@ int kvm_get_msr_common(struct kvm_vcpu > *vcpu, struct msr_data *msr_info) > > case MSR_K7_HWCR: > > msr_info->data = vcpu->arch.msr_hwcr; > > break; > > +#ifdef CONFIG_X86_64 > > + case MSR_IA32_XFD: > > + if (!msr_info->host_initiated && > > + !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) > > + return 1; > > + > > + msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; > > + break; > > +#endif > > default: > > if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) > > return kvm_pmu_get_msr(vcpu, msr_info); > > @@ -6422,6 +6445,10 @@ static void kvm_init_msr_list(void) > > min(INTEL_PMC_MAX_GENERIC, > x86_pmu.num_counters_gp)) > > continue; > > break; > > + case MSR_IA32_XFD: > > + if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) > > + continue; > > I suspect the 32-bit host support is wrong. The kernel's handle_xfd_event() > checks for 64-bit support in addition to the CPU feature itself, which implies > that the feature can be reported in boot_cpu_data for 32-bit kernels. > > static bool handle_xfd_event(struct pt_regs *regs) > { > u64 xfd_err; > int err; > > if (!IS_ENABLED(CONFIG_X86_64) > || !cpu_feature_enabled(X86_FEATURE_XFD)) > return false; > > ... > } > > In this specific case, that means KVM will tell userspace it needs to mgirate > MSR_IA32_XFD, and then reject attempts to read/write the MSR. > > If 32-bit host kernels do not explicitly suppress X86_FEATURE_XFD, then KVM I didn't find explicit suppress > needs > to do: > > diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c > index 556555537a18..156ce332d55b 100644 > --- a/arch/x86/kvm/cpuid.c > +++ b/arch/x86/kvm/cpuid.c > @@ -455,9 +455,11 @@ void kvm_set_cpu_caps(void) > #ifdef CONFIG_X86_64 > unsigned int f_gbpages = F(GBPAGES); > unsigned int f_lm = F(LM); > + unsigned int f_xfd = F(XFD); > #else > unsigned int f_gbpages = 0; > unsigned int f_lm = 0; > + unsigned int f_xfd = 0; > #endif > memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); > > @@ -545,7 +547,7 @@ void kvm_set_cpu_caps(void) > ); > > kvm_cpu_cap_mask(CPUID_D_1_EAX, > - F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | F(XFD) > + F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd > ); > > kvm_cpu_cap_init_scattered(CPUID_12_EAX, > so this change makes sense. will incorporate it in next version. Thanks Kevin
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e50e97ac4408..36677b754ac9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1359,6 +1359,7 @@ static const u32 msrs_to_save_all[] = { MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, + MSR_IA32_XFD, }; static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; @@ -3669,6 +3670,19 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; vcpu->arch.msr_misc_features_enables = data; break; +#ifdef CONFIG_X86_64 + case MSR_IA32_XFD: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) + return 1; + + if (data & ~(XFEATURE_MASK_USER_DYNAMIC & + vcpu->arch.guest_supported_xcr0)) + return 1; + + fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); + break; +#endif default: if (kvm_pmu_is_valid_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); @@ -3989,6 +4003,15 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_K7_HWCR: msr_info->data = vcpu->arch.msr_hwcr; break; +#ifdef CONFIG_X86_64 + case MSR_IA32_XFD: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) + return 1; + + msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; + break; +#endif default: if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) return kvm_pmu_get_msr(vcpu, msr_info); @@ -6422,6 +6445,10 @@ static void kvm_init_msr_list(void) min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) continue; break; + case MSR_IA32_XFD: + if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) + continue; + break; default: break; }