diff mbox series

[V4,07/13] hyperv/Vmbus: Add SNP support for VMbus channel initiate message

Message ID 20210827172114.414281-8-ltykernel@gmail.com
State New
Headers show
Series x86/Hyper-V: Add Hyper-V Isolation VM support | expand

Commit Message

Tianyu Lan Aug. 27, 2021, 5:21 p.m. UTC
From: Tianyu Lan <Tianyu.Lan@microsoft.com>

The monitor pages in the CHANNELMSG_INITIATE_CONTACT msg are shared
with host in Isolation VM and so it's necessary to use hvcall to set
them visible to host. In Isolation VM with AMD SEV SNP, the access
address should be in the extra space which is above shared gpa
boundary. So remap these pages into the extra address(pa +
shared_gpa_boundary).

Introduce monitor_pages_original[] in the struct vmbus_connection
to store monitor page virtual address returned by hv_alloc_hyperv_
zeroed_page() and free monitor page via monitor_pages_original in
the vmbus_disconnect(). The monitor_pages[] is to used to access
monitor page and it is initialized to be equal with monitor_pages_
original. The monitor_pages[] will be overridden in the isolation VM
with va of extra address.

Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com>
---
Change since v3:
	* Rename monitor_pages_va with monitor_pages_original
	* free monitor page via monitor_pages_original and
	  monitor_pages is used to access monitor page.

Change since v1:
        * Not remap monitor pages in the non-SNP isolation VM.
---
 drivers/hv/connection.c   | 75 ++++++++++++++++++++++++++++++++++++---
 drivers/hv/hyperv_vmbus.h |  1 +
 2 files changed, 72 insertions(+), 4 deletions(-)

Comments

Michael Kelley Sept. 2, 2021, 12:21 a.m. UTC | #1
From: Tianyu Lan <ltykernel@gmail.com> Sent: Friday, August 27, 2021 10:21 AM
> 

Subject line tag should be "Drivers: hv: vmbus:"

> The monitor pages in the CHANNELMSG_INITIATE_CONTACT msg are shared
> with host in Isolation VM and so it's necessary to use hvcall to set
> them visible to host. In Isolation VM with AMD SEV SNP, the access
> address should be in the extra space which is above shared gpa
> boundary. So remap these pages into the extra address(pa +
> shared_gpa_boundary).
> 
> Introduce monitor_pages_original[] in the struct vmbus_connection
> to store monitor page virtual address returned by hv_alloc_hyperv_
> zeroed_page() and free monitor page via monitor_pages_original in
> the vmbus_disconnect(). The monitor_pages[] is to used to access
> monitor page and it is initialized to be equal with monitor_pages_
> original. The monitor_pages[] will be overridden in the isolation VM
> with va of extra address.
> 
> Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com>
> ---
> Change since v3:
> 	* Rename monitor_pages_va with monitor_pages_original
> 	* free monitor page via monitor_pages_original and
> 	  monitor_pages is used to access monitor page.
> 
> Change since v1:
>         * Not remap monitor pages in the non-SNP isolation VM.
> ---
>  drivers/hv/connection.c   | 75 ++++++++++++++++++++++++++++++++++++---
>  drivers/hv/hyperv_vmbus.h |  1 +
>  2 files changed, 72 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
> index 6d315c1465e0..9a48d8115c87 100644
> --- a/drivers/hv/connection.c
> +++ b/drivers/hv/connection.c
> @@ -19,6 +19,7 @@
>  #include <linux/vmalloc.h>
>  #include <linux/hyperv.h>
>  #include <linux/export.h>
> +#include <linux/io.h>
>  #include <asm/mshyperv.h>
> 
>  #include "hyperv_vmbus.h"
> @@ -104,6 +105,12 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
> 
>  	msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
>  	msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
> +
> +	if (hv_isolation_type_snp()) {
> +		msg->monitor_page1 += ms_hyperv.shared_gpa_boundary;
> +		msg->monitor_page2 += ms_hyperv.shared_gpa_boundary;
> +	}
> +
>  	msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
> 
>  	/*
> @@ -148,6 +155,35 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
>  		return -ECONNREFUSED;
>  	}
> 
> +
> +	if (hv_is_isolation_supported()) {
> +		if (hv_isolation_type_snp()) {
> +			vmbus_connection.monitor_pages[0]
> +				= memremap(msg->monitor_page1, HV_HYP_PAGE_SIZE,
> +					   MEMREMAP_WB);
> +			if (!vmbus_connection.monitor_pages[0])
> +				return -ENOMEM;
> +
> +			vmbus_connection.monitor_pages[1]
> +				= memremap(msg->monitor_page2, HV_HYP_PAGE_SIZE,
> +					   MEMREMAP_WB);
> +			if (!vmbus_connection.monitor_pages[1]) {
> +				memunmap(vmbus_connection.monitor_pages[0]);
> +				return -ENOMEM;
> +			}
> +		}
> +
> +		/*
> +		 * Set memory host visibility hvcall smears memory
> +		 * and so zero monitor pages here.
> +		 */
> +		memset(vmbus_connection.monitor_pages[0], 0x00,
> +		       HV_HYP_PAGE_SIZE);
> +		memset(vmbus_connection.monitor_pages[1], 0x00,
> +		       HV_HYP_PAGE_SIZE);
> +
> +	}

I still find it somewhat confusing to have the handling of the
shared_gpa_boundary and memory mapping in the function for
negotiating the VMbus version.  I think the code works as written,
but it would seem cleaner and easier to understand to precompute
the physical addresses and do all the mapping and memory zero'ing
in a single place in vmbus_connect().  Then the negotiate version
function can focus on doing only the version negotiation.

> +
>  	return ret;
>  }
> 
> @@ -159,6 +195,7 @@ int vmbus_connect(void)
>  	struct vmbus_channel_msginfo *msginfo = NULL;
>  	int i, ret = 0;
>  	__u32 version;
> +	u64 pfn[2];
> 
>  	/* Initialize the vmbus connection */
>  	vmbus_connection.conn_state = CONNECTING;
> @@ -216,6 +253,21 @@ int vmbus_connect(void)
>  		goto cleanup;
>  	}
> 
> +	vmbus_connection.monitor_pages_original[0]
> +		= vmbus_connection.monitor_pages[0];
> +	vmbus_connection.monitor_pages_original[1]
> +		= vmbus_connection.monitor_pages[1];
> +
> +	if (hv_is_isolation_supported()) {
> +		pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);
> +		pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);
> +		if (hv_mark_gpa_visibility(2, pfn,
> +				VMBUS_PAGE_VISIBLE_READ_WRITE)) {

In Patch 4 of this series, host visibility for the specified buffer is done
by calling set_memory_decrypted()/set_memory_encrypted().  Could
the same be done here?   The code would be more consistent overall
with better encapsulation.  hv_mark_gpa_visibility() would not need to
be exported or need an ARM64 stub.

set_memory_decrypted()/encrypted() seem to be the primary functions
that should be used for this purpose, and they have already have the
appropriate stubs for architectures that don't support memory encryption.

> +			ret = -EFAULT;
> +			goto cleanup;
> +		}
> +	}
> +
>  	msginfo = kzalloc(sizeof(*msginfo) +
>  			  sizeof(struct vmbus_channel_initiate_contact),
>  			  GFP_KERNEL);
> @@ -284,6 +336,8 @@ int vmbus_connect(void)
> 
>  void vmbus_disconnect(void)
>  {
> +	u64 pfn[2];
> +
>  	/*
>  	 * First send the unload request to the host.
>  	 */
> @@ -303,10 +357,23 @@ void vmbus_disconnect(void)
>  		vmbus_connection.int_page = NULL;
>  	}
> 
> -	hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
> -	hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
> -	vmbus_connection.monitor_pages[0] = NULL;
> -	vmbus_connection.monitor_pages[1] = NULL;
> +	if (hv_is_isolation_supported()) {
> +		memunmap(vmbus_connection.monitor_pages[0]);
> +		memunmap(vmbus_connection.monitor_pages[1]);
> +
> +		pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);
> +		pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);
> +		hv_mark_gpa_visibility(2, pfn, VMBUS_PAGE_NOT_VISIBLE);

Same comment about using set_memory_encrypted() instead.

> +	}
> +
> +	hv_free_hyperv_page((unsigned long)
> +		vmbus_connection.monitor_pages_original[0]);
> +	hv_free_hyperv_page((unsigned long)
> +		vmbus_connection.monitor_pages_original[1]);
> +	vmbus_connection.monitor_pages_original[0] =
> +		vmbus_connection.monitor_pages[0] = NULL;
> +	vmbus_connection.monitor_pages_original[1] =
> +		vmbus_connection.monitor_pages[1] = NULL;
>  }
> 
>  /*
> diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
> index 42f3d9d123a1..7cb11ef694da 100644
> --- a/drivers/hv/hyperv_vmbus.h
> +++ b/drivers/hv/hyperv_vmbus.h
> @@ -240,6 +240,7 @@ struct vmbus_connection {
>  	 * is child->parent notification
>  	 */
>  	struct hv_monitor_page *monitor_pages[2];
> +	void *monitor_pages_original[2];
>  	struct list_head chn_msg_list;
>  	spinlock_t channelmsg_lock;
> 
> --
> 2.25.1
diff mbox series

Patch

diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 6d315c1465e0..9a48d8115c87 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -19,6 +19,7 @@ 
 #include <linux/vmalloc.h>
 #include <linux/hyperv.h>
 #include <linux/export.h>
+#include <linux/io.h>
 #include <asm/mshyperv.h>
 
 #include "hyperv_vmbus.h"
@@ -104,6 +105,12 @@  int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
 
 	msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
 	msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+
+	if (hv_isolation_type_snp()) {
+		msg->monitor_page1 += ms_hyperv.shared_gpa_boundary;
+		msg->monitor_page2 += ms_hyperv.shared_gpa_boundary;
+	}
+
 	msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
 
 	/*
@@ -148,6 +155,35 @@  int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
 		return -ECONNREFUSED;
 	}
 
+
+	if (hv_is_isolation_supported()) {
+		if (hv_isolation_type_snp()) {
+			vmbus_connection.monitor_pages[0]
+				= memremap(msg->monitor_page1, HV_HYP_PAGE_SIZE,
+					   MEMREMAP_WB);
+			if (!vmbus_connection.monitor_pages[0])
+				return -ENOMEM;
+
+			vmbus_connection.monitor_pages[1]
+				= memremap(msg->monitor_page2, HV_HYP_PAGE_SIZE,
+					   MEMREMAP_WB);
+			if (!vmbus_connection.monitor_pages[1]) {
+				memunmap(vmbus_connection.monitor_pages[0]);
+				return -ENOMEM;
+			}
+		}
+
+		/*
+		 * Set memory host visibility hvcall smears memory
+		 * and so zero monitor pages here.
+		 */
+		memset(vmbus_connection.monitor_pages[0], 0x00,
+		       HV_HYP_PAGE_SIZE);
+		memset(vmbus_connection.monitor_pages[1], 0x00,
+		       HV_HYP_PAGE_SIZE);
+
+	}
+
 	return ret;
 }
 
@@ -159,6 +195,7 @@  int vmbus_connect(void)
 	struct vmbus_channel_msginfo *msginfo = NULL;
 	int i, ret = 0;
 	__u32 version;
+	u64 pfn[2];
 
 	/* Initialize the vmbus connection */
 	vmbus_connection.conn_state = CONNECTING;
@@ -216,6 +253,21 @@  int vmbus_connect(void)
 		goto cleanup;
 	}
 
+	vmbus_connection.monitor_pages_original[0]
+		= vmbus_connection.monitor_pages[0];
+	vmbus_connection.monitor_pages_original[1]
+		= vmbus_connection.monitor_pages[1];
+
+	if (hv_is_isolation_supported()) {
+		pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);
+		pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);
+		if (hv_mark_gpa_visibility(2, pfn,
+				VMBUS_PAGE_VISIBLE_READ_WRITE)) {
+			ret = -EFAULT;
+			goto cleanup;
+		}
+	}
+
 	msginfo = kzalloc(sizeof(*msginfo) +
 			  sizeof(struct vmbus_channel_initiate_contact),
 			  GFP_KERNEL);
@@ -284,6 +336,8 @@  int vmbus_connect(void)
 
 void vmbus_disconnect(void)
 {
+	u64 pfn[2];
+
 	/*
 	 * First send the unload request to the host.
 	 */
@@ -303,10 +357,23 @@  void vmbus_disconnect(void)
 		vmbus_connection.int_page = NULL;
 	}
 
-	hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
-	hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
-	vmbus_connection.monitor_pages[0] = NULL;
-	vmbus_connection.monitor_pages[1] = NULL;
+	if (hv_is_isolation_supported()) {
+		memunmap(vmbus_connection.monitor_pages[0]);
+		memunmap(vmbus_connection.monitor_pages[1]);
+
+		pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);
+		pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);
+		hv_mark_gpa_visibility(2, pfn, VMBUS_PAGE_NOT_VISIBLE);
+	}
+
+	hv_free_hyperv_page((unsigned long)
+		vmbus_connection.monitor_pages_original[0]);
+	hv_free_hyperv_page((unsigned long)
+		vmbus_connection.monitor_pages_original[1]);
+	vmbus_connection.monitor_pages_original[0] =
+		vmbus_connection.monitor_pages[0] = NULL;
+	vmbus_connection.monitor_pages_original[1] =
+		vmbus_connection.monitor_pages[1] = NULL;
 }
 
 /*
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 42f3d9d123a1..7cb11ef694da 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -240,6 +240,7 @@  struct vmbus_connection {
 	 * is child->parent notification
 	 */
 	struct hv_monitor_page *monitor_pages[2];
+	void *monitor_pages_original[2];
 	struct list_head chn_msg_list;
 	spinlock_t channelmsg_lock;