diff mbox series

[Part2,v5,26/45] KVM: SVM: Mark the private vma unmerable for SEV-SNP guests

Message ID 20210820155918.7518-27-brijesh.singh@amd.com
State New
Headers show
Series [Part2,v5,01/45] x86/cpufeatures: Add SEV-SNP CPU feature | expand

Commit Message

Brijesh Singh Aug. 20, 2021, 3:58 p.m. UTC
When SEV-SNP is enabled, the guest private pages are added in the RMP
table; while adding the pages, the rmp_make_private() unmaps the pages
from the direct map. If KSM attempts to access those unmapped pages then
it will trigger #PF (page-not-present).

Encrypted guest pages cannot be shared between the process, so an
userspace should not mark the region mergeable but to be safe, mark the
process vma unmerable before adding the pages in the RMP table.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/kvm/svm/sev.c | 32 ++++++++++++++++++++++++++++++++
 1 file changed, 32 insertions(+)

Comments

Dr. David Alan Gilbert Sept. 23, 2021, 5:18 p.m. UTC | #1
* Brijesh Singh (brijesh.singh@amd.com) wrote:
> When SEV-SNP is enabled, the guest private pages are added in the RMP

> table; while adding the pages, the rmp_make_private() unmaps the pages

> from the direct map. If KSM attempts to access those unmapped pages then

> it will trigger #PF (page-not-present).

> 

> Encrypted guest pages cannot be shared between the process, so an

> userspace should not mark the region mergeable but to be safe, mark the

> process vma unmerable before adding the pages in the RMP table.

              ^^^^^^^^^

(and in the subject) -> unmergeable

> 

> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>

> ---

>  arch/x86/kvm/svm/sev.c | 32 ++++++++++++++++++++++++++++++++

>  1 file changed, 32 insertions(+)

> 

> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c

> index 4b126598b7aa..dcef0ae5f8e4 100644

> --- a/arch/x86/kvm/svm/sev.c

> +++ b/arch/x86/kvm/svm/sev.c

> @@ -18,11 +18,13 @@

>  #include <linux/processor.h>

>  #include <linux/trace_events.h>

>  #include <linux/sev.h>

> +#include <linux/ksm.h>

>  #include <asm/fpu/internal.h>

>  

>  #include <asm/pkru.h>

>  #include <asm/trapnr.h>

>  #include <asm/sev.h>

> +#include <asm/mman.h>

>  

>  #include "x86.h"

>  #include "svm.h"

> @@ -1683,6 +1685,30 @@ static bool is_hva_registered(struct kvm *kvm, hva_t hva, size_t len)

>  	return false;

>  }

>  

> +static int snp_mark_unmergable(struct kvm *kvm, u64 start, u64 size)

                       ^^^^^^^^^^

> +{

> +	struct vm_area_struct *vma;

> +	u64 end = start + size;


Do you need to worry about wrap there? (User supplied start/size?)

Dave

> +	int ret;

> +

> +	do {

> +		vma = find_vma_intersection(kvm->mm, start, end);

> +		if (!vma) {

> +			ret = -EINVAL;

> +			break;

> +		}

> +

> +		ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,

> +				  MADV_UNMERGEABLE, &vma->vm_flags);

> +		if (ret)

> +			break;

> +

> +		start = vma->vm_end;

> +	} while (end > vma->vm_end);

> +

> +	return ret;

> +}

> +

>  static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)

>  {

>  	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;

> @@ -1707,6 +1733,12 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)

>  	if (!is_hva_registered(kvm, params.uaddr, params.len))

>  		return -EINVAL;

>  

> +	mmap_write_lock(kvm->mm);

> +	ret = snp_mark_unmergable(kvm, params.uaddr, params.len);

> +	mmap_write_unlock(kvm->mm);

> +	if (ret)

> +		return -EFAULT;

> +

>  	/*

>  	 * The userspace memory is already locked so technically we don't

>  	 * need to lock it again. Later part of the function needs to know

> -- 

> 2.17.1

> 

> 

-- 
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 4b126598b7aa..dcef0ae5f8e4 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -18,11 +18,13 @@ 
 #include <linux/processor.h>
 #include <linux/trace_events.h>
 #include <linux/sev.h>
+#include <linux/ksm.h>
 #include <asm/fpu/internal.h>
 
 #include <asm/pkru.h>
 #include <asm/trapnr.h>
 #include <asm/sev.h>
+#include <asm/mman.h>
 
 #include "x86.h"
 #include "svm.h"
@@ -1683,6 +1685,30 @@  static bool is_hva_registered(struct kvm *kvm, hva_t hva, size_t len)
 	return false;
 }
 
+static int snp_mark_unmergable(struct kvm *kvm, u64 start, u64 size)
+{
+	struct vm_area_struct *vma;
+	u64 end = start + size;
+	int ret;
+
+	do {
+		vma = find_vma_intersection(kvm->mm, start, end);
+		if (!vma) {
+			ret = -EINVAL;
+			break;
+		}
+
+		ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
+				  MADV_UNMERGEABLE, &vma->vm_flags);
+		if (ret)
+			break;
+
+		start = vma->vm_end;
+	} while (end > vma->vm_end);
+
+	return ret;
+}
+
 static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
 {
 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
@@ -1707,6 +1733,12 @@  static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
 	if (!is_hva_registered(kvm, params.uaddr, params.len))
 		return -EINVAL;
 
+	mmap_write_lock(kvm->mm);
+	ret = snp_mark_unmergable(kvm, params.uaddr, params.len);
+	mmap_write_unlock(kvm->mm);
+	if (ret)
+		return -EFAULT;
+
 	/*
 	 * The userspace memory is already locked so technically we don't
 	 * need to lock it again. Later part of the function needs to know