diff mbox series

[Resend,RFC,V2,06/12] HV/Vmbus: Add SNP support for VMbus channel initiate message

Message ID 20210414144945.3460554-7-ltykernel@gmail.com
State Superseded
Headers show
Series x86/Hyper-V: Add Hyper-V Isolation VM support | expand

Commit Message

Tianyu Lan April 14, 2021, 2:49 p.m. UTC
From: Tianyu Lan <Tianyu.Lan@microsoft.com>

The physical address of monitor pages in the CHANNELMSG_INITIATE_CONTACT
msg should be in the extra address space for SNP support and these
pages also should be accessed via the extra address space inside Linux
guest and remap the extra address by ioremap function.

Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com>
---
 drivers/hv/connection.c   | 62 +++++++++++++++++++++++++++++++++++++++
 drivers/hv/hyperv_vmbus.h |  1 +
 2 files changed, 63 insertions(+)

Comments

Konrad Rzeszutek Wilk April 15, 2021, 6:52 p.m. UTC | #1
On Wed, Apr 14, 2021 at 10:49:39AM -0400, Tianyu Lan wrote:
> From: Tianyu Lan <Tianyu.Lan@microsoft.com>

> 

> The physical address of monitor pages in the CHANNELMSG_INITIATE_CONTACT

> msg should be in the extra address space for SNP support and these


What is this 'extra address space'? Is that just normal virtual address
space of the Linux kernel?

> pages also should be accessed via the extra address space inside Linux

> guest and remap the extra address by ioremap function.


OK, why do you need to use ioremap on them? Why not use vmap for
example? What is it that makes ioremap the right candidate?





> 

> Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com>

> ---

>  drivers/hv/connection.c   | 62 +++++++++++++++++++++++++++++++++++++++

>  drivers/hv/hyperv_vmbus.h |  1 +

>  2 files changed, 63 insertions(+)

> 

> diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c

> index 79bca653dce9..a0be9c11d737 100644

> --- a/drivers/hv/connection.c

> +++ b/drivers/hv/connection.c

> @@ -101,6 +101,12 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)

>  

>  	msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);

>  	msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);

> +

> +	if (hv_isolation_type_snp()) {

> +		msg->monitor_page1 += ms_hyperv.shared_gpa_boundary;

> +		msg->monitor_page2 += ms_hyperv.shared_gpa_boundary;

> +	}

> +

>  	msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);

>  

>  	/*

> @@ -145,6 +151,29 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)

>  		return -ECONNREFUSED;

>  	}

>  

> +	if (hv_isolation_type_snp()) {

> +		vmbus_connection.monitor_pages_va[0]

> +			= vmbus_connection.monitor_pages[0];

> +		vmbus_connection.monitor_pages[0]

> +			= ioremap_cache(msg->monitor_page1, HV_HYP_PAGE_SIZE);

> +		if (!vmbus_connection.monitor_pages[0])

> +			return -ENOMEM;

> +

> +		vmbus_connection.monitor_pages_va[1]

> +			= vmbus_connection.monitor_pages[1];

> +		vmbus_connection.monitor_pages[1]

> +			= ioremap_cache(msg->monitor_page2, HV_HYP_PAGE_SIZE);

> +		if (!vmbus_connection.monitor_pages[1]) {

> +			vunmap(vmbus_connection.monitor_pages[0]);

> +			return -ENOMEM;

> +		}

> +

> +		memset(vmbus_connection.monitor_pages[0], 0x00,

> +		       HV_HYP_PAGE_SIZE);

> +		memset(vmbus_connection.monitor_pages[1], 0x00,

> +		       HV_HYP_PAGE_SIZE);

> +	}

> +

>  	return ret;

>  }

>  

> @@ -156,6 +185,7 @@ int vmbus_connect(void)

>  	struct vmbus_channel_msginfo *msginfo = NULL;

>  	int i, ret = 0;

>  	__u32 version;

> +	u64 pfn[2];

>  

>  	/* Initialize the vmbus connection */

>  	vmbus_connection.conn_state = CONNECTING;

> @@ -213,6 +243,16 @@ int vmbus_connect(void)

>  		goto cleanup;

>  	}

>  

> +	if (hv_isolation_type_snp()) {

> +		pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);

> +		pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);

> +		if (hv_mark_gpa_visibility(2, pfn,

> +				VMBUS_PAGE_VISIBLE_READ_WRITE)) {

> +			ret = -EFAULT;

> +			goto cleanup;

> +		}

> +	}

> +

>  	msginfo = kzalloc(sizeof(*msginfo) +

>  			  sizeof(struct vmbus_channel_initiate_contact),

>  			  GFP_KERNEL);

> @@ -279,6 +319,8 @@ int vmbus_connect(void)

>  

>  void vmbus_disconnect(void)

>  {

> +	u64 pfn[2];

> +

>  	/*

>  	 * First send the unload request to the host.

>  	 */

> @@ -298,6 +340,26 @@ void vmbus_disconnect(void)

>  		vmbus_connection.int_page = NULL;

>  	}

>  

> +	if (hv_isolation_type_snp()) {

> +		if (vmbus_connection.monitor_pages_va[0]) {

> +			vunmap(vmbus_connection.monitor_pages[0]);

> +			vmbus_connection.monitor_pages[0]

> +				= vmbus_connection.monitor_pages_va[0];

> +			vmbus_connection.monitor_pages_va[0] = NULL;

> +		}

> +

> +		if (vmbus_connection.monitor_pages_va[1]) {

> +			vunmap(vmbus_connection.monitor_pages[1]);

> +			vmbus_connection.monitor_pages[1]

> +				= vmbus_connection.monitor_pages_va[1];

> +			vmbus_connection.monitor_pages_va[1] = NULL;

> +		}

> +

> +		pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);

> +		pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);

> +		hv_mark_gpa_visibility(2, pfn, VMBUS_PAGE_NOT_VISIBLE);

> +	}

> +

>  	hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);

>  	hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);

>  	vmbus_connection.monitor_pages[0] = NULL;

> diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h

> index 9416e09ebd58..0778add21a9c 100644

> --- a/drivers/hv/hyperv_vmbus.h

> +++ b/drivers/hv/hyperv_vmbus.h

> @@ -240,6 +240,7 @@ struct vmbus_connection {

>  	 * is child->parent notification

>  	 */

>  	struct hv_monitor_page *monitor_pages[2];

> +	void *monitor_pages_va[2];

>  	struct list_head chn_msg_list;

>  	spinlock_t channelmsg_lock;

>  

> -- 

> 2.25.1

>
diff mbox series

Patch

diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 79bca653dce9..a0be9c11d737 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -101,6 +101,12 @@  int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
 
 	msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
 	msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+
+	if (hv_isolation_type_snp()) {
+		msg->monitor_page1 += ms_hyperv.shared_gpa_boundary;
+		msg->monitor_page2 += ms_hyperv.shared_gpa_boundary;
+	}
+
 	msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
 
 	/*
@@ -145,6 +151,29 @@  int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
 		return -ECONNREFUSED;
 	}
 
+	if (hv_isolation_type_snp()) {
+		vmbus_connection.monitor_pages_va[0]
+			= vmbus_connection.monitor_pages[0];
+		vmbus_connection.monitor_pages[0]
+			= ioremap_cache(msg->monitor_page1, HV_HYP_PAGE_SIZE);
+		if (!vmbus_connection.monitor_pages[0])
+			return -ENOMEM;
+
+		vmbus_connection.monitor_pages_va[1]
+			= vmbus_connection.monitor_pages[1];
+		vmbus_connection.monitor_pages[1]
+			= ioremap_cache(msg->monitor_page2, HV_HYP_PAGE_SIZE);
+		if (!vmbus_connection.monitor_pages[1]) {
+			vunmap(vmbus_connection.monitor_pages[0]);
+			return -ENOMEM;
+		}
+
+		memset(vmbus_connection.monitor_pages[0], 0x00,
+		       HV_HYP_PAGE_SIZE);
+		memset(vmbus_connection.monitor_pages[1], 0x00,
+		       HV_HYP_PAGE_SIZE);
+	}
+
 	return ret;
 }
 
@@ -156,6 +185,7 @@  int vmbus_connect(void)
 	struct vmbus_channel_msginfo *msginfo = NULL;
 	int i, ret = 0;
 	__u32 version;
+	u64 pfn[2];
 
 	/* Initialize the vmbus connection */
 	vmbus_connection.conn_state = CONNECTING;
@@ -213,6 +243,16 @@  int vmbus_connect(void)
 		goto cleanup;
 	}
 
+	if (hv_isolation_type_snp()) {
+		pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);
+		pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);
+		if (hv_mark_gpa_visibility(2, pfn,
+				VMBUS_PAGE_VISIBLE_READ_WRITE)) {
+			ret = -EFAULT;
+			goto cleanup;
+		}
+	}
+
 	msginfo = kzalloc(sizeof(*msginfo) +
 			  sizeof(struct vmbus_channel_initiate_contact),
 			  GFP_KERNEL);
@@ -279,6 +319,8 @@  int vmbus_connect(void)
 
 void vmbus_disconnect(void)
 {
+	u64 pfn[2];
+
 	/*
 	 * First send the unload request to the host.
 	 */
@@ -298,6 +340,26 @@  void vmbus_disconnect(void)
 		vmbus_connection.int_page = NULL;
 	}
 
+	if (hv_isolation_type_snp()) {
+		if (vmbus_connection.monitor_pages_va[0]) {
+			vunmap(vmbus_connection.monitor_pages[0]);
+			vmbus_connection.monitor_pages[0]
+				= vmbus_connection.monitor_pages_va[0];
+			vmbus_connection.monitor_pages_va[0] = NULL;
+		}
+
+		if (vmbus_connection.monitor_pages_va[1]) {
+			vunmap(vmbus_connection.monitor_pages[1]);
+			vmbus_connection.monitor_pages[1]
+				= vmbus_connection.monitor_pages_va[1];
+			vmbus_connection.monitor_pages_va[1] = NULL;
+		}
+
+		pfn[0] = virt_to_hvpfn(vmbus_connection.monitor_pages[0]);
+		pfn[1] = virt_to_hvpfn(vmbus_connection.monitor_pages[1]);
+		hv_mark_gpa_visibility(2, pfn, VMBUS_PAGE_NOT_VISIBLE);
+	}
+
 	hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
 	hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
 	vmbus_connection.monitor_pages[0] = NULL;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 9416e09ebd58..0778add21a9c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -240,6 +240,7 @@  struct vmbus_connection {
 	 * is child->parent notification
 	 */
 	struct hv_monitor_page *monitor_pages[2];
+	void *monitor_pages_va[2];
 	struct list_head chn_msg_list;
 	spinlock_t channelmsg_lock;