diff mbox series

[RFT,v3,07/21] x86/sev: Move MSR save/restore out of early page state change helper

Message ID 20250512190834.332684-30-ardb+git@google.com
State New
Headers show
Series x86: strict separation of startup code | expand

Commit Message

Ard Biesheuvel May 12, 2025, 7:08 p.m. UTC
From: Ard Biesheuvel <ardb@kernel.org>

The function __page_state_change() in the decompressor is very similar
to the loop in early_set_pages_state(), and they can share this code
once the MSR save/restore is moved out.

This also avoids doing the preserve/restore for each page in a longer
sequence unnecessarily.

This simplifies subsequent changes, where the APIs used by
__page_state_change() are modified for better separation between startup
code and ordinary code.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/x86/boot/compressed/sev.c | 17 +++++++++++------
 1 file changed, 11 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 70c3f4fc4349..bdedf4bd23ec 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -71,9 +71,6 @@  static void __page_state_change(unsigned long paddr, enum psc_op op)
 	if (op == SNP_PAGE_STATE_SHARED)
 		pvalidate_4k_page(paddr, paddr, false);
 
-	/* Save the current GHCB MSR value */
-	msr = sev_es_rd_ghcb_msr();
-
 	/* Issue VMGEXIT to change the page state in RMP table. */
 	sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
 	VMGEXIT();
@@ -83,9 +80,6 @@  static void __page_state_change(unsigned long paddr, enum psc_op op)
 	if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
 
-	/* Restore the GHCB MSR value */
-	sev_es_wr_ghcb_msr(msr);
-
 	/*
 	 * Now that page state is changed in the RMP table, validate it so that it is
 	 * consistent with the RMP entry.
@@ -96,18 +90,26 @@  static void __page_state_change(unsigned long paddr, enum psc_op op)
 
 void snp_set_page_private(unsigned long paddr)
 {
+	u64 msr;
+
 	if (!sev_snp_enabled())
 		return;
 
+	msr = sev_es_rd_ghcb_msr();
 	__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
+	sev_es_wr_ghcb_msr(msr);
 }
 
 void snp_set_page_shared(unsigned long paddr)
 {
+	u64 msr;
+
 	if (!sev_snp_enabled())
 		return;
 
+	msr = sev_es_rd_ghcb_msr();
 	__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
+	sev_es_wr_ghcb_msr(msr);
 }
 
 bool early_setup_ghcb(void)
@@ -132,8 +134,11 @@  bool early_setup_ghcb(void)
 
 void snp_accept_memory(phys_addr_t start, phys_addr_t end)
 {
+	u64 msr = sev_es_rd_ghcb_msr();
+
 	for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
 		__page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
+	sev_es_wr_ghcb_msr(msr);
 }
 
 void sev_es_shutdown_ghcb(void)