diff mbox

[Xen-devel] swiotlb-xen: implement xen_swiotlb_dma_mmap callback

Message ID 1396852323-11814-1-git-send-email-oleksandr.dmytryshyn@globallogic.com
State New
Headers show

Commit Message

Oleksandr Dmytryshyn April 7, 2014, 6:32 a.m. UTC
From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>

This function creates userspace mapping for the DMA-coherent memory.

Change-Id: If22f75996278063394f628f57869ccd3ca09de7c
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Oleksandr Dmytryshyn <oleksandr.dmytryshyn@globallogic.com>
---
 arch/arm/xen/mm.c         |  1 +
 drivers/xen/swiotlb-xen.c | 36 ++++++++++++++++++++++++++++++++++++
 include/xen/swiotlb-xen.h |  5 +++++
 3 files changed, 42 insertions(+)

Comments

Stefano Stabellini April 7, 2014, 9:47 a.m. UTC | #1
On Mon, 7 Apr 2014, Oleksandr Dmytryshyn wrote:
> From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> 
> This function creates userspace mapping for the DMA-coherent memory.
> 
> Change-Id: If22f75996278063394f628f57869ccd3ca09de7c
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> Signed-off-by: Oleksandr Dmytryshyn <oleksandr.dmytryshyn@globallogic.com>

Much better, thank you.
Given that Xen needs MMU support I think we can safely get rid of the
ifdef CONFIG_MMU.


>  arch/arm/xen/mm.c         |  1 +
>  drivers/xen/swiotlb-xen.c | 36 ++++++++++++++++++++++++++++++++++++
>  include/xen/swiotlb-xen.h |  5 +++++
>  3 files changed, 42 insertions(+)
> 
> diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
> index b0e77de..91408b1 100644
> --- a/arch/arm/xen/mm.c
> +++ b/arch/arm/xen/mm.c
> @@ -48,6 +48,7 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
>  	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
>  	.map_sg = xen_swiotlb_map_sg_attrs,
>  	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
> +	.mmap = xen_swiotlb_dma_mmap,
>  	.map_page = xen_swiotlb_map_page,
>  	.unmap_page = xen_swiotlb_unmap_page,
>  	.dma_supported = xen_swiotlb_dma_supported,
> diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
> index 5403855..acf0c06 100644
> --- a/drivers/xen/swiotlb-xen.c
> +++ b/drivers/xen/swiotlb-xen.c
> @@ -407,6 +407,42 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
>  EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
>  
>  /*
> + * Create userspace mapping for the DMA-coherent memory.
> + */
> +int xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
> +			 void *cpu_addr, dma_addr_t dma_addr, size_t size,
> +			 struct dma_attrs *attrs)
> +{
> +	int ret = -ENXIO;
> +#ifdef CONFIG_MMU
> +	unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
> +	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
> +	unsigned long pfn = PFN_DOWN(xen_bus_to_phys(dma_addr));
> +	unsigned long off = vma->vm_pgoff;
> +	pgprot_t prot = vma->vm_page_prot;
> +
> +	prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
> +			    pgprot_writecombine(prot) :
> +			    pgprot_dmacoherent(prot);
> +
> +	vma->vm_page_prot = prot;
> +
> +	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
> +		return ret;
> +
> +	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
> +		ret = remap_pfn_range(vma, vma->vm_start,
> +					pfn + off,
> +					vma->vm_end - vma->vm_start,
> +					vma->vm_page_prot);
> +	}
> +#endif  /* CONFIG_MMU */
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
> +
> +/*
>   * Unmap a single streaming mode DMA translation.  The dma_addr and size must
>   * match what was provided for in a previous xen_swiotlb_map_page call.  All
>   * other usages are undefined.
> diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
> index 7b64465..930fa94 100644
> --- a/include/xen/swiotlb-xen.h
> +++ b/include/xen/swiotlb-xen.h
> @@ -15,6 +15,11 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
>  			  void *vaddr, dma_addr_t dma_handle,
>  			  struct dma_attrs *attrs);
>  
> +extern int
> +xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
> +		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
> +		     struct dma_attrs *attrs);
> +
>  extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
>  				       unsigned long offset, size_t size,
>  				       enum dma_data_direction dir,
> -- 
> 1.8.2.rc2
>
diff mbox

Patch

diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index b0e77de..91408b1 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -48,6 +48,7 @@  static struct dma_map_ops xen_swiotlb_dma_ops = {
 	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
 	.map_sg = xen_swiotlb_map_sg_attrs,
 	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
+	.mmap = xen_swiotlb_dma_mmap,
 	.map_page = xen_swiotlb_map_page,
 	.unmap_page = xen_swiotlb_unmap_page,
 	.dma_supported = xen_swiotlb_dma_supported,
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 5403855..acf0c06 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -407,6 +407,42 @@  dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
 
 /*
+ * Create userspace mapping for the DMA-coherent memory.
+ */
+int xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+			 void *cpu_addr, dma_addr_t dma_addr, size_t size,
+			 struct dma_attrs *attrs)
+{
+	int ret = -ENXIO;
+#ifdef CONFIG_MMU
+	unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	unsigned long pfn = PFN_DOWN(xen_bus_to_phys(dma_addr));
+	unsigned long off = vma->vm_pgoff;
+	pgprot_t prot = vma->vm_page_prot;
+
+	prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
+			    pgprot_writecombine(prot) :
+			    pgprot_dmacoherent(prot);
+
+	vma->vm_page_prot = prot;
+
+	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+		return ret;
+
+	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
+		ret = remap_pfn_range(vma, vma->vm_start,
+					pfn + off,
+					vma->vm_end - vma->vm_start,
+					vma->vm_page_prot);
+	}
+#endif  /* CONFIG_MMU */
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
+
+/*
  * Unmap a single streaming mode DMA translation.  The dma_addr and size must
  * match what was provided for in a previous xen_swiotlb_map_page call.  All
  * other usages are undefined.
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 7b64465..930fa94 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -15,6 +15,11 @@  xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
 			  void *vaddr, dma_addr_t dma_handle,
 			  struct dma_attrs *attrs);
 
+extern int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
+		     struct dma_attrs *attrs);
+
 extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 				       unsigned long offset, size_t size,
 				       enum dma_data_direction dir,