diff mbox series

[v10,04/50] x86/cpufeatures: Add SEV-SNP CPU feature

Message ID 20231016132819.1002933-5-michael.roth@amd.com
State New
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Michael Roth Oct. 16, 2023, 1:27 p.m. UTC
From: Brijesh Singh <brijesh.singh@amd.com>

Add CPU feature detection for Secure Encrypted Virtualization with
Secure Nested Paging. This feature adds a strong memory integrity
protection to help prevent malicious hypervisor-based attacks like
data replay, memory re-mapping, and more.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Jarkko Sakkinen <jarkko@profian.com>
Signed-off-by: Ashish Kalra <Ashish.Kalra@amd.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
---
 arch/x86/include/asm/cpufeatures.h       | 1 +
 arch/x86/kernel/cpu/amd.c                | 5 +++--
 tools/arch/x86/include/asm/cpufeatures.h | 1 +
 3 files changed, 5 insertions(+), 2 deletions(-)

Comments

Paolo Bonzini Dec. 13, 2023, 12:51 p.m. UTC | #1
On 10/16/23 15:27, Michael Roth wrote:
> From: Brijesh Singh <brijesh.singh@amd.com>
> 
> Add CPU feature detection for Secure Encrypted Virtualization with
> Secure Nested Paging. This feature adds a strong memory integrity
> protection to help prevent malicious hypervisor-based attacks like
> data replay, memory re-mapping, and more.
> 
> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
> Signed-off-by: Jarkko Sakkinen <jarkko@profian.com>
> Signed-off-by: Ashish Kalra <Ashish.Kalra@amd.com>
> Signed-off-by: Michael Roth <michael.roth@amd.com>

Queued, thanks.

Paolo

> ---
>   arch/x86/include/asm/cpufeatures.h       | 1 +
>   arch/x86/kernel/cpu/amd.c                | 5 +++--
>   tools/arch/x86/include/asm/cpufeatures.h | 1 +
>   3 files changed, 5 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
> index 58cb9495e40f..1640cedd77f1 100644
> --- a/arch/x86/include/asm/cpufeatures.h
> +++ b/arch/x86/include/asm/cpufeatures.h
> @@ -437,6 +437,7 @@
>   #define X86_FEATURE_SEV			(19*32+ 1) /* AMD Secure Encrypted Virtualization */
>   #define X86_FEATURE_VM_PAGE_FLUSH	(19*32+ 2) /* "" VM Page Flush MSR is supported */
>   #define X86_FEATURE_SEV_ES		(19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
> +#define X86_FEATURE_SEV_SNP		(19*32+ 4) /* AMD Secure Encrypted Virtualization - Secure Nested Paging */
>   #define X86_FEATURE_V_TSC_AUX		(19*32+ 9) /* "" Virtual TSC_AUX */
>   #define X86_FEATURE_SME_COHERENT	(19*32+10) /* "" AMD hardware-enforced cache coherency */
>   #define X86_FEATURE_DEBUG_SWAP		(19*32+14) /* AMD SEV-ES full debug state swap support */
> diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
> index dd8379d84445..14ee7f750cc7 100644
> --- a/arch/x86/kernel/cpu/amd.c
> +++ b/arch/x86/kernel/cpu/amd.c
> @@ -630,8 +630,8 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
>   	 *	      SME feature (set in scattered.c).
>   	 *	      If the kernel has not enabled SME via any means then
>   	 *	      don't advertise the SME feature.
> -	 *   For SEV: If BIOS has not enabled SEV then don't advertise the
> -	 *            SEV and SEV_ES feature (set in scattered.c).
> +	 *   For SEV: If BIOS has not enabled SEV then don't advertise SEV and
> +	 *	      any additional functionality based on it.
>   	 *
>   	 *   In all cases, since support for SME and SEV requires long mode,
>   	 *   don't advertise the feature under CONFIG_X86_32.
> @@ -666,6 +666,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
>   clear_sev:
>   		setup_clear_cpu_cap(X86_FEATURE_SEV);
>   		setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
> +		setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
>   	}
>   }
>   
> diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
> index 798e60b5454b..669f45eefa0c 100644
> --- a/tools/arch/x86/include/asm/cpufeatures.h
> +++ b/tools/arch/x86/include/asm/cpufeatures.h
> @@ -432,6 +432,7 @@
>   #define X86_FEATURE_SEV			(19*32+ 1) /* AMD Secure Encrypted Virtualization */
>   #define X86_FEATURE_VM_PAGE_FLUSH	(19*32+ 2) /* "" VM Page Flush MSR is supported */
>   #define X86_FEATURE_SEV_ES		(19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
> +#define X86_FEATURE_SEV_SNP		(19*32+ 4) /* AMD Secure Encrypted Virtualization - Secure Nested Paging */
>   #define X86_FEATURE_V_TSC_AUX		(19*32+ 9) /* "" Virtual TSC_AUX */
>   #define X86_FEATURE_SME_COHERENT	(19*32+10) /* "" AMD hardware-enforced cache coherency */
>   #define X86_FEATURE_DEBUG_SWAP		(19*32+14) /* AMD SEV-ES full debug state swap support */
Borislav Petkov Dec. 13, 2023, 1:13 p.m. UTC | #2
On Wed, Dec 13, 2023 at 01:51:58PM +0100, Paolo Bonzini wrote:
> On 10/16/23 15:27, Michael Roth wrote:
> > From: Brijesh Singh <brijesh.singh@amd.com>
> > 
> > Add CPU feature detection for Secure Encrypted Virtualization with
> > Secure Nested Paging. This feature adds a strong memory integrity
> > protection to help prevent malicious hypervisor-based attacks like
> > data replay, memory re-mapping, and more.
> > 
> > Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
> > Signed-off-by: Jarkko Sakkinen <jarkko@profian.com>
> > Signed-off-by: Ashish Kalra <Ashish.Kalra@amd.com>
> > Signed-off-by: Michael Roth <michael.roth@amd.com>
> 
> Queued, thanks.

Paolo, please stop queueing x86 patches through your tree. I'll give you
an immutable branch with the x86 bits when the stuff has been reviewed.

Thx.
Paolo Bonzini Dec. 13, 2023, 1:31 p.m. UTC | #3
On 12/13/23 14:13, Borislav Petkov wrote:
> On Wed, Dec 13, 2023 at 01:51:58PM +0100, Paolo Bonzini wrote:
>> On 10/16/23 15:27, Michael Roth wrote:
>>> From: Brijesh Singh <brijesh.singh@amd.com>
>>>
>>> Add CPU feature detection for Secure Encrypted Virtualization with
>>> Secure Nested Paging. This feature adds a strong memory integrity
>>> protection to help prevent malicious hypervisor-based attacks like
>>> data replay, memory re-mapping, and more.
>>>
>>> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
>>> Signed-off-by: Jarkko Sakkinen <jarkko@profian.com>
>>> Signed-off-by: Ashish Kalra <Ashish.Kalra@amd.com>
>>> Signed-off-by: Michael Roth <michael.roth@amd.com>
>>
>> Queued, thanks.
> 
> Paolo, please stop queueing x86 patches through your tree. I'll give you
> an immutable branch with the x86 bits when the stuff has been reviewed.

Sure, I only queued it because you gave Acked-by for 05/50 and this is 
an obvious dependency.  I would like to get things in as they are ready 
(whenever it makes sense), so if you want to include those two in the 
x86 tree for 6.8, that would work for me.

Paolo
Borislav Petkov Dec. 13, 2023, 1:36 p.m. UTC | #4
On Wed, Dec 13, 2023 at 02:31:05PM +0100, Paolo Bonzini wrote:
> Sure, I only queued it because you gave Acked-by for 05/50 and this is an
> obvious dependency.  I would like to get things in as they are ready
> (whenever it makes sense), so if you want to include those two in the x86
> tree for 6.8, that would work for me.

It doesn't make sense to include them into 6.8 because the two alone are
simply dead code in 6.8.

The plan is to put the x86 patches first in the next submission, I'll
pick them up for 6.9 and then give you an immutable branch to apply the
KVM bits ontop. This way it all goes together.

Thx.
Paolo Bonzini Dec. 13, 2023, 1:40 p.m. UTC | #5
On 12/13/23 14:36, Borislav Petkov wrote:
>> Sure, I only queued it because you gave Acked-by for 05/50 and this is an
>> obvious dependency.  I would like to get things in as they are ready
>> (whenever it makes sense), so if you want to include those two in the x86
>> tree for 6.8, that would work for me.
>
> It doesn't make sense to include them into 6.8 because the two alone are
> simply dead code in 6.8.

Why are they dead code?  X86_FEATURE_SEV_SNP is set automatically based 
on CPUID, therefore patch 5 is a performance improvement on all 
processors that support SEV-SNP.  This is independent of whether KVM can 
create SEV-SNP guests or not.

If this is wrong, there is a problem in the commit messages.

Paolo

> The plan is to put the x86 patches first in the next submission, I'll
> pick them up for 6.9 and then give you an immutable branch to apply the
> KVM bits ontop. This way it all goes together.
Borislav Petkov Dec. 13, 2023, 1:49 p.m. UTC | #6
On Wed, Dec 13, 2023 at 02:40:24PM +0100, Paolo Bonzini wrote:
> Why are they dead code?  X86_FEATURE_SEV_SNP is set automatically based on
> CPUID, therefore patch 5 is a performance improvement on all processors that
> support SEV-SNP.  This is independent of whether KVM can create SEV-SNP
> guests or not.

No, it is not. This CPUID bit means:

"RMP table can be enabled to protect memory even from hypervisor."

Without the SNP host patches, it is dead code.

And regardless, arch/x86/kvm/ patches go through the kvm tree. The rest
of arch/x86/ through the tip tree. We've been over this a bunch of times
already.

If you don't agree with this split, let's discuss it offlist with all
tip and kvm maintainers, reach an agreement who picks up what and to put
an end to this nonsense.

Thx.
Paolo Bonzini Dec. 13, 2023, 2:18 p.m. UTC | #7
On 12/13/23 14:49, Borislav Petkov wrote:
> On Wed, Dec 13, 2023 at 02:40:24PM +0100, Paolo Bonzini wrote:
>> Why are they dead code?  X86_FEATURE_SEV_SNP is set automatically based on
>> CPUID, therefore patch 5 is a performance improvement on all processors that
>> support SEV-SNP.  This is independent of whether KVM can create SEV-SNP
>> guests or not.
> 
> No, it is not. This CPUID bit means:
> 
> "RMP table can be enabled to protect memory even from hypervisor."
> 
> Without the SNP host patches, it is dead code.

-	if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
+	if ((ia32_cap & ARCH_CAP_IBRS_ALL) ||
+	    (cpu_has(c, X86_FEATURE_AUTOIBRS) &&
+	     !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {

Surely we can agree that cpu_feature_enabled(X86_FEATURE_SEV_SNP) has nothing
to do with SEV-SNP host patches being present?  And that therefore retpolines
are preferred even without any SEV-SNP support in KVM?

And can we agree that "Acked-by" means "feel free and take it if you wish,
I don't care enough to merge it through my tree or provide a topic branch"?

I'm asking because I'm not sure if we agree on these two things, but they
really seem basic to me?

Paolo

> And regardless, arch/x86/kvm/ patches go through the kvm tree. The rest
> of arch/x86/ through the tip tree. We've been over this a bunch of times
> already.


> If you don't agree with this split, let's discuss it offlist with all
> tip and kvm maintainers, reach an agreement who picks up what and to put
> an end to this nonsense.
> 
> Thx.
>
Borislav Petkov Dec. 13, 2023, 3:41 p.m. UTC | #8
On Wed, Dec 13, 2023 at 03:18:17PM +0100, Paolo Bonzini wrote:
> Surely we can agree that cpu_feature_enabled(X86_FEATURE_SEV_SNP) has nothing
> to do with SEV-SNP host patches being present? 

It does - we're sanitizing the meaning of a CPUID flag present in
/proc/cpuinfo, see here:

https://git.kernel.org/tip/79c603ee43b2674fba0257803bab265147821955

> And that therefore retpolines are preferred even without any SEV-SNP
> support in KVM?

No, automatic IBRS should be disabled when SNP is enabled. Not CPUID
present - enabled. We clear that bit on a couple of occasions in the SNP
host patchset if we determine that SNP host support is not possible so
4/50 needs to go together with the rest to mean something.

> And can we agree that "Acked-by" means "feel free and take it if you wish,

I can see how it can mean that and I'm sorry for the misunderstanding
I caused. Two things here:

* I acked it because I did a lengthly digging internally on whether
disabling AIBRS makes sense on SNP and this was a note more to myself to
say, yes, that's a good change.

* If I wanted for you to pick it up, I would've acked 4/50 too. Which
I haven't.

> I'm asking because I'm not sure if we agree on these two things, but they
> really seem basic to me?

I think KVM and x86 maintainers should sit down and discuss who picks up
what and through which tree so that there's no more confusion in the
future. It seems things need discussion...

Thx.
Paolo Bonzini Dec. 13, 2023, 5:35 p.m. UTC | #9
On 12/13/23 16:41, Borislav Petkov wrote:
> On Wed, Dec 13, 2023 at 03:18:17PM +0100, Paolo Bonzini wrote:
>> Surely we can agree that cpu_feature_enabled(X86_FEATURE_SEV_SNP) has nothing
>> to do with SEV-SNP host patches being present?
> 
> It does - we're sanitizing the meaning of a CPUID flag present in
> /proc/cpuinfo, see here:
> 
> https://git.kernel.org/tip/79c603ee43b2674fba0257803bab265147821955
>
>> And that therefore retpolines are preferred even without any SEV-SNP
>> support in KVM?
> 
> No, automatic IBRS should be disabled when SNP is enabled. Not CPUID
> present - enabled.

Ok, so the root cause of the problem is commit message/patch ordering:

1) patch 4 should have unconditionally cleared the feature (until the 
initialization code comes around in patch 6); and it should have 
mentioned in the commit message that we don't want X86_FEATURE_SEV_SNP 
to be set, unless SNP can be enabled via MSR_AMD64_SYSCFG.

2) possibly, the commit message of patch 5 could have said something 
like "at this point in the kernel SNP is never enabled".

3) Patch 23 should have been placed before the SNP initialization, 
because as things stand the patches (mildly) break bisectability.

> We clear that bit on a couple of occasions in the SNP
> host patchset if we determine that SNP host support is not possible so
> 4/50 needs to go together with the rest to mean something.

Understood now.  With the patch ordering and commit message edits I 
suggested above, indeed I would not have picked up patch 4.

But with your explanation, I would even say that "4/50 needs to go 
together with the rest" *for correctness*, not just to mean something.

Paolo
Borislav Petkov Dec. 13, 2023, 6:53 p.m. UTC | #10
On Wed, Dec 13, 2023 at 06:35:35PM +0100, Paolo Bonzini wrote:
> 1) patch 4 should have unconditionally cleared the feature (until the
> initialization code comes around in patch 6); and it should have mentioned
> in the commit message that we don't want X86_FEATURE_SEV_SNP to be set,
> unless SNP can be enabled via MSR_AMD64_SYSCFG.

I guess.

> 2) possibly, the commit message of patch 5 could have said something like
> "at this point in the kernel SNP is never enabled".

Sure.

> 3) Patch 23 should have been placed before the SNP initialization, because
> as things stand the patches (mildly) break bisectability.

Ok, I still haven't reached that one.

> Understood now.  With the patch ordering and commit message edits I
> suggested above, indeed I would not have picked up patch 4.

In the future, please simply refrain from picking up x86 patches if you
haven't gotten an explicit ACK.

We could have conflicting changes in tip, we could be reworking that
part of the code and the thing the patch touches could be completely
gone, and so on and so on...

Also, we want to have a full picture of what goes in. Exactly the same
as how you'd like to have a full picture of what goes into kvm and how
we don't apply kvm patches unless there's some extraordinary
circumstance or we have received an explicit ACK.
  
> But with your explanation, I would even say that "4/50 needs to go
> together with the rest" *for correctness*, not just to mean something.

Yes, but for ease of integration it would be easier if they go in two
groups - kvm and x86 bits. Not: some x86 bits first, then kvm bits
through your tree and then some more x86 bits. That would be
a logistical nightmare.

And even if you bisect and land at 4/50 and you disable AIBRS even
without SNP being really enabled, that is not a big deal - you're only
bisecting and not really using that kernel and it's not like it breaks
builds so...

Thx.
diff mbox series

Patch

diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 58cb9495e40f..1640cedd77f1 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -437,6 +437,7 @@ 
 #define X86_FEATURE_SEV			(19*32+ 1) /* AMD Secure Encrypted Virtualization */
 #define X86_FEATURE_VM_PAGE_FLUSH	(19*32+ 2) /* "" VM Page Flush MSR is supported */
 #define X86_FEATURE_SEV_ES		(19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
+#define X86_FEATURE_SEV_SNP		(19*32+ 4) /* AMD Secure Encrypted Virtualization - Secure Nested Paging */
 #define X86_FEATURE_V_TSC_AUX		(19*32+ 9) /* "" Virtual TSC_AUX */
 #define X86_FEATURE_SME_COHERENT	(19*32+10) /* "" AMD hardware-enforced cache coherency */
 #define X86_FEATURE_DEBUG_SWAP		(19*32+14) /* AMD SEV-ES full debug state swap support */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index dd8379d84445..14ee7f750cc7 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -630,8 +630,8 @@  static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
 	 *	      SME feature (set in scattered.c).
 	 *	      If the kernel has not enabled SME via any means then
 	 *	      don't advertise the SME feature.
-	 *   For SEV: If BIOS has not enabled SEV then don't advertise the
-	 *            SEV and SEV_ES feature (set in scattered.c).
+	 *   For SEV: If BIOS has not enabled SEV then don't advertise SEV and
+	 *	      any additional functionality based on it.
 	 *
 	 *   In all cases, since support for SME and SEV requires long mode,
 	 *   don't advertise the feature under CONFIG_X86_32.
@@ -666,6 +666,7 @@  static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
 clear_sev:
 		setup_clear_cpu_cap(X86_FEATURE_SEV);
 		setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
+		setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
 	}
 }
 
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 798e60b5454b..669f45eefa0c 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -432,6 +432,7 @@ 
 #define X86_FEATURE_SEV			(19*32+ 1) /* AMD Secure Encrypted Virtualization */
 #define X86_FEATURE_VM_PAGE_FLUSH	(19*32+ 2) /* "" VM Page Flush MSR is supported */
 #define X86_FEATURE_SEV_ES		(19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
+#define X86_FEATURE_SEV_SNP		(19*32+ 4) /* AMD Secure Encrypted Virtualization - Secure Nested Paging */
 #define X86_FEATURE_V_TSC_AUX		(19*32+ 9) /* "" Virtual TSC_AUX */
 #define X86_FEATURE_SME_COHERENT	(19*32+10) /* "" AMD hardware-enforced cache coherency */
 #define X86_FEATURE_DEBUG_SWAP		(19*32+14) /* AMD SEV-ES full debug state swap support */