diff mbox series

[RFC,Part2,03/30] x86: add helper functions for RMPUPDATE and PSMASH instruction

Message ID 20210324170436.31843-4-brijesh.singh@amd.com
State New
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Brijesh Singh March 24, 2021, 5:04 p.m. UTC
The RMPUPDATE instruction writes a new RMP entry in the RMP Table. The
hypervisor will use the instruction to add pages to the RMP table. See
APM3 for details on the instruction operations.

The PSMASH instruction expands a 2MB RMP entry into a corresponding set of
contiguous 4KB-Page RMP entries. The hypervisor will use this instruction
to adjust the RMP entry without invalidating the previous RMP entry.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: x86@kernel.org
Cc: kvm@vger.kernel.org
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/include/asm/sev-snp.h | 27 ++++++++++++++++++++++
 arch/x86/mm/mem_encrypt.c      | 41 ++++++++++++++++++++++++++++++++++
 2 files changed, 68 insertions(+)

Comments

Borislav Petkov April 15, 2021, 6 p.m. UTC | #1
On Wed, Mar 24, 2021 at 12:04:09PM -0500, Brijesh Singh wrote:
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c

> index 06394b6d56b2..7a0138cb3e17 100644

> --- a/arch/x86/mm/mem_encrypt.c

> +++ b/arch/x86/mm/mem_encrypt.c

> @@ -644,3 +644,44 @@ rmpentry_t *lookup_page_in_rmptable(struct page *page, int *level)

>  	return entry;

>  }

>  EXPORT_SYMBOL_GPL(lookup_page_in_rmptable);

> +

> +int rmptable_psmash(struct page *page)


psmash() should be enough like all those other wrappers around insns.

> +{

> +	unsigned long spa = page_to_pfn(page) << PAGE_SHIFT;

> +	int ret;

> +

> +	if (!static_branch_unlikely(&snp_enable_key))

> +		return -ENXIO;

> +

> +	/* Retry if another processor is modifying the RMP entry. */


Also, a comment here should say which binutils version supports the
insn mnemonic so that it can be converted to "psmash" later. Ditto for
rmpupdate below.

Looking at the binutils repo, it looks like since version 2.36.

/me rebuilds objdump...

> +	do {

> +		asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"

> +			      : "=a"(ret)

> +			      : "a"(spa)

> +			      : "memory", "cc");

> +	} while (ret == PSMASH_FAIL_INUSE);

> +

> +	return ret;

> +}

> +EXPORT_SYMBOL_GPL(rmptable_psmash);

> +

> +int rmptable_rmpupdate(struct page *page, struct rmpupdate *val)


rmpupdate()

> +{

> +	unsigned long spa = page_to_pfn(page) << PAGE_SHIFT;

> +	bool flush = true;

> +	int ret;

> +

> +	if (!static_branch_unlikely(&snp_enable_key))

> +		return -ENXIO;

> +

> +	/* Retry if another processor is modifying the RMP entry. */

> +	do {

> +		asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"

> +			     : "=a"(ret)

> +			     : "a"(spa), "c"((unsigned long)val), "d"(flush)

					    ^^^^^^^^^^^^^^^

what's the cast for?

"d"(flush)?

There's nothing in the APM talking about RMPUPDATE taking an input arg
in %rdx?

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette
Brijesh Singh April 15, 2021, 6:15 p.m. UTC | #2
On 4/15/21 1:00 PM, Borislav Petkov wrote:
> On Wed, Mar 24, 2021 at 12:04:09PM -0500, Brijesh Singh wrote:

>> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c

>> index 06394b6d56b2..7a0138cb3e17 100644

>> --- a/arch/x86/mm/mem_encrypt.c

>> +++ b/arch/x86/mm/mem_encrypt.c

>> @@ -644,3 +644,44 @@ rmpentry_t *lookup_page_in_rmptable(struct page *page, int *level)

>>  	return entry;

>>  }

>>  EXPORT_SYMBOL_GPL(lookup_page_in_rmptable);

>> +

>> +int rmptable_psmash(struct page *page)

> psmash() should be enough like all those other wrappers around insns.


Noted.


>

>> +{

>> +	unsigned long spa = page_to_pfn(page) << PAGE_SHIFT;

>> +	int ret;

>> +

>> +	if (!static_branch_unlikely(&snp_enable_key))

>> +		return -ENXIO;

>> +

>> +	/* Retry if another processor is modifying the RMP entry. */

> Also, a comment here should say which binutils version supports the

> insn mnemonic so that it can be converted to "psmash" later. Ditto for

> rmpupdate below.

>

> Looking at the binutils repo, it looks like since version 2.36.

>

> /me rebuilds objdump...


Sure, I will add comment.

>> +	do {

>> +		asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"

>> +			      : "=a"(ret)

>> +			      : "a"(spa)

>> +			      : "memory", "cc");

>> +	} while (ret == PSMASH_FAIL_INUSE);

>> +

>> +	return ret;

>> +}

>> +EXPORT_SYMBOL_GPL(rmptable_psmash);

>> +

>> +int rmptable_rmpupdate(struct page *page, struct rmpupdate *val)

> rmpupdate()

>

>> +{

>> +	unsigned long spa = page_to_pfn(page) << PAGE_SHIFT;

>> +	bool flush = true;

>> +	int ret;

>> +

>> +	if (!static_branch_unlikely(&snp_enable_key))

>> +		return -ENXIO;

>> +

>> +	/* Retry if another processor is modifying the RMP entry. */

>> +	do {

>> +		asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"

>> +			     : "=a"(ret)

>> +			     : "a"(spa), "c"((unsigned long)val), "d"(flush)

> 					    ^^^^^^^^^^^^^^^

>

> what's the cast for?

No need to cast it. I will drop in next round.
> "d"(flush)?


Hmm, either it copied this function from pvalidate or old internal APM
may had the flush. I will fix it in the next rev. thanks for pointing it.


>

> There's nothing in the APM talking about RMPUPDATE taking an input arg

> in %rdx?

>
diff mbox series

Patch

diff --git a/arch/x86/include/asm/sev-snp.h b/arch/x86/include/asm/sev-snp.h
index 2aa14b38c5ed..199d88a38c76 100644
--- a/arch/x86/include/asm/sev-snp.h
+++ b/arch/x86/include/asm/sev-snp.h
@@ -96,6 +96,29 @@  typedef struct rmpentry rmpentry_t;
 #define rmpentry_gpa(x)		((unsigned long)(x)->info.gpa)
 #define rmpentry_immutable(x)	((x)->info.immutable)
 
+
+/* Return code of RMPUPDATE */
+#define RMPUPDATE_SUCCESS		0
+#define RMPUPDATE_FAIL_INPUT		1
+#define RMPUPDATE_FAIL_PERMISSION	2
+#define RMPUPDATE_FAIL_INUSE		3
+#define RMPUPDATE_FAIL_OVERLAP		4
+
+struct rmpupdate {
+	u64 gpa;
+	u8 assigned;
+	u8 pagesize;
+	u8 immutable;
+	u8 rsvd;
+	u32 asid;
+} __packed;
+
+/* Return code of PSMASH */
+#define PSMASH_FAIL_INPUT		1
+#define PSMASH_FAIL_PERMISSION		2
+#define PSMASH_FAIL_INUSE		3
+#define PSMASH_FAIL_BADADDR		4
+
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 #include <linux/jump_label.h>
 
@@ -124,6 +147,8 @@  void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
 int snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
 int snp_set_memory_private(unsigned long vaddr, unsigned int npages);
 rmpentry_t *lookup_page_in_rmptable(struct page *page, int *level);
+int rmptable_psmash(struct page *page);
+int rmptable_rmpupdate(struct page *page, struct rmpupdate *e);
 
 extern struct static_key_false snp_enable_key;
 static inline bool snp_key_active(void)
@@ -155,6 +180,8 @@  static inline int snp_set_memory_shared(unsigned long vaddr, unsigned int npages
 static inline int snp_set_memory_private(unsigned long vaddr, unsigned int npages) { return 0; }
 static inline bool snp_key_active(void) { return false; }
 static inline rpmentry_t *lookup_page_in_rmptable(struct page *page, int *level) { return NULL; }
+static inline int rmptable_psmash(struct page *page) { return -ENXIO; }
+static inline int rmptable_rmpupdate(struct page *page, struct rmpupdate *e) { return -ENXIO; }
 
 #endif /* CONFIG_AMD_MEM_ENCRYPT */
 
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 06394b6d56b2..7a0138cb3e17 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -644,3 +644,44 @@  rmpentry_t *lookup_page_in_rmptable(struct page *page, int *level)
 	return entry;
 }
 EXPORT_SYMBOL_GPL(lookup_page_in_rmptable);
+
+int rmptable_psmash(struct page *page)
+{
+	unsigned long spa = page_to_pfn(page) << PAGE_SHIFT;
+	int ret;
+
+	if (!static_branch_unlikely(&snp_enable_key))
+		return -ENXIO;
+
+	/* Retry if another processor is modifying the RMP entry. */
+	do {
+		asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
+			      : "=a"(ret)
+			      : "a"(spa)
+			      : "memory", "cc");
+	} while (ret == PSMASH_FAIL_INUSE);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rmptable_psmash);
+
+int rmptable_rmpupdate(struct page *page, struct rmpupdate *val)
+{
+	unsigned long spa = page_to_pfn(page) << PAGE_SHIFT;
+	bool flush = true;
+	int ret;
+
+	if (!static_branch_unlikely(&snp_enable_key))
+		return -ENXIO;
+
+	/* Retry if another processor is modifying the RMP entry. */
+	do {
+		asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
+			     : "=a"(ret)
+			     : "a"(spa), "c"((unsigned long)val), "d"(flush)
+			     : "memory", "cc");
+	} while (ret == PSMASH_FAIL_INUSE);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rmptable_rmpupdate);